summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnsible Core Team <info@ansible.com>2020-03-09 09:40:37 +0000
committerAnsible Core Team <info@ansible.com>2020-03-09 09:40:37 +0000
commitb182f411ea18f84e274096efc3288dfc7c9887f5 (patch)
tree386049f500b76e03430a9e8093e7e838da4966fd
parentb33dfe795861f249b9d5c9604f3543eeb1a0fd51 (diff)
downloadansible-b182f411ea18f84e274096efc3288dfc7c9887f5.tar.gz
Migrated to azure.azcollection
-rw-r--r--lib/ansible/module_utils/azure_rm_common.py1473
-rw-r--r--lib/ansible/module_utils/azure_rm_common_ext.py211
-rw-r--r--lib/ansible/module_utils/azure_rm_common_rest.py97
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_acs.py745
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_aks.py841
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_aks_info.py191
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_aksversion_info.py133
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_appgateway.py1009
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_applicationsecuritygroup.py250
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_applicationsecuritygroup_info.py229
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_appserviceplan.py379
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_appserviceplan_info.py241
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_automationaccount.py174
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_automationaccount_info.py383
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_autoscale.py649
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_autoscale_info.py271
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_availabilityset.py346
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_availabilityset_info.py216
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_azurefirewall.py729
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_azurefirewall_info.py275
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_batchaccount.py341
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint.py666
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint_info.py315
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_cdnprofile.py304
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_cdnprofile_info.py268
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_containerinstance.py529
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_containerinstance_info.py320
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_containerregistry.py411
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_containerregistry_info.py283
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount.py587
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount_info.py520
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_deployment.py702
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_deployment_info.py249
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlab.py284
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlab_info.py272
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlabarmtemplate_info.py226
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifact_info.py250
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifactsource.py365
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifactsource_info.py258
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlabcustomimage.py383
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlabcustomimage_info.py229
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlabenvironment.py379
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlabenvironment_info.py245
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlabpolicy.py401
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlabpolicy_info.py243
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule.py341
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule_info.py222
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine.py544
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine_info.py329
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualnetwork.py293
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualnetwork_info.py221
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset.py485
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset_info.py294
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_dnszone.py302
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_dnszone_info.py258
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_functionapp.py421
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_functionapp_info.py206
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_gallery.py308
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_gallery_info.py263
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_galleryimage.py544
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_galleryimage_info.py274
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion.py629
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion_info.py270
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_hdinsightcluster.py555
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_hdinsightcluster_info.py321
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_image.py370
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_image_info.py307
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_iotdevice.py472
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_iotdevice_info.py313
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_iotdevicemodule.py378
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_iothub.py895
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_iothub_info.py618
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_iothubconsumergroup.py169
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_keyvault.py504
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_keyvault_info.py323
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_keyvaultkey.py310
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_keyvaultkey_info.py466
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_keyvaultsecret.py231
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_loadbalancer.py1042
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_loadbalancer_info.py177
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_lock.py216
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_lock_info.py223
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_loganalyticsworkspace.py321
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_loganalyticsworkspace_info.py269
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_manageddisk.py493
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_manageddisk_info.py243
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_mariadbconfiguration.py241
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_mariadbconfiguration_info.py216
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_mariadbdatabase.py304
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_mariadbdatabase_info.py211
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_mariadbfirewallrule.py277
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_mariadbfirewallrule_info.py207
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_mariadbserver.py388
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_mariadbserver_info.py264
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_monitorlogprofile.py392
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_mysqlconfiguration.py240
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_mysqlconfiguration_info.py214
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_mysqldatabase.py302
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_mysqldatabase_info.py209
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_mysqlfirewallrule.py277
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_mysqlfirewallrule_info.py205
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_mysqlserver.py386
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_mysqlserver_info.py262
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py877
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_networkinterface_info.py354
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_postgresqlconfiguration.py240
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_postgresqlconfiguration_info.py217
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_postgresqldatabase.py303
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_postgresqldatabase_info.py209
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_postgresqlfirewallrule.py275
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_postgresqlfirewallrule_info.py205
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_postgresqlserver.py387
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_postgresqlserver_info.py263
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_publicipaddress.py426
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_publicipaddress_info.py321
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_rediscache.py779
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_rediscache_info.py354
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_rediscachefirewallrule.py320
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_resource.py427
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_resource_info.py431
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_resourcegroup.py291
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_resourcegroup_info.py240
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_roleassignment.py283
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_roleassignment_info.py280
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_roledefinition.py402
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_roledefinition_info.py310
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_route.py220
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_routetable.py195
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_routetable_info.py219
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py817
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_securitygroup_info.py320
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_servicebus.py207
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_servicebus_info.py584
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_servicebusqueue.py339
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_servicebussaspolicy.py328
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_servicebustopic.py302
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_servicebustopicsubscription.py320
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_snapshot.py391
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_sqldatabase.py514
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_sqldatabase_info.py288
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule.py273
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule_info.py215
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_sqlserver.py320
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_sqlserver_info.py207
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_storageaccount.py684
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_storageaccount_info.py557
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_storageblob.py548
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_subnet.py399
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_subnet_info.py246
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerendpoint.py374
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerendpoint_info.py311
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile.py464
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile_info.py422
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py2203
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualmachine_info.py456
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualmachineextension.py339
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualmachineextension_info.py248
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualmachineimage_info.py258
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset.py1254
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset_info.py437
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetextension.py301
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetextension_info.py228
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetinstance.py325
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetinstance_info.py243
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py394
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_info.py338
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkgateway.py383
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkpeering.py414
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkpeering_info.py256
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_webapp.py1070
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_webapp_info.py488
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_webappslot.py1058
-rw-r--r--lib/ansible/plugins/doc_fragments/azure.py113
-rw-r--r--lib/ansible/plugins/doc_fragments/azure_tags.py25
-rw-r--r--lib/ansible/plugins/inventory/azure_rm.py645
-rw-r--r--test/integration/targets/azure_rm_acs/aliases3
-rw-r--r--test/integration/targets/azure_rm_acs/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_acs/tasks/main.yml149
-rw-r--r--test/integration/targets/azure_rm_aks/aliases3
-rw-r--r--test/integration/targets/azure_rm_aks/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_aks/tasks/main.yml213
-rw-r--r--test/integration/targets/azure_rm_appgateway/aliases3
-rw-r--r--test/integration/targets/azure_rm_appgateway/files/cert1.txt1
-rw-r--r--test/integration/targets/azure_rm_appgateway/files/cert2.txt1
-rw-r--r--test/integration/targets/azure_rm_appgateway/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_appgateway/tasks/main.yml401
-rw-r--r--test/integration/targets/azure_rm_appserviceplan/aliases4
-rw-r--r--test/integration/targets/azure_rm_appserviceplan/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_appserviceplan/tasks/main.yml84
-rw-r--r--test/integration/targets/azure_rm_automationaccount/aliases4
-rw-r--r--test/integration/targets/azure_rm_automationaccount/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_automationaccount/tasks/main.yml88
-rw-r--r--test/integration/targets/azure_rm_autoscale/aliases4
-rw-r--r--test/integration/targets/azure_rm_autoscale/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_autoscale/tasks/main.yml219
-rw-r--r--test/integration/targets/azure_rm_availabilityset/aliases4
-rw-r--r--test/integration/targets/azure_rm_availabilityset/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_availabilityset/tasks/main.yml136
-rw-r--r--test/integration/targets/azure_rm_azurefirewall/aliases4
-rw-r--r--test/integration/targets/azure_rm_azurefirewall/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_azurefirewall/tasks/main.yml277
-rw-r--r--test/integration/targets/azure_rm_batchaccount/aliases3
-rw-r--r--test/integration/targets/azure_rm_batchaccount/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_batchaccount/tasks/main.yml76
-rw-r--r--test/integration/targets/azure_rm_cdnprofile/aliases5
-rw-r--r--test/integration/targets/azure_rm_cdnprofile/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_cdnprofile/tasks/main.yml276
-rw-r--r--test/integration/targets/azure_rm_containerinstance/aliases4
-rw-r--r--test/integration/targets/azure_rm_containerinstance/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_containerinstance/tasks/main.yml214
-rw-r--r--test/integration/targets/azure_rm_containerregistry/aliases4
-rw-r--r--test/integration/targets/azure_rm_containerregistry/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_containerregistry/tasks/main.yml116
-rw-r--r--test/integration/targets/azure_rm_cosmosdbaccount/aliases4
-rw-r--r--test/integration/targets/azure_rm_cosmosdbaccount/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_cosmosdbaccount/tasks/main.yml249
-rw-r--r--test/integration/targets/azure_rm_deployment/aliases3
-rw-r--r--test/integration/targets/azure_rm_deployment/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_deployment/tasks/main.yml45
-rw-r--r--test/integration/targets/azure_rm_devtestlab/aliases17
-rw-r--r--test/integration/targets/azure_rm_devtestlab/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_devtestlab/tasks/main.yml766
-rw-r--r--test/integration/targets/azure_rm_dnszone/aliases6
-rw-r--r--test/integration/targets/azure_rm_dnszone/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_dnszone/tasks/main.yml355
-rw-r--r--test/integration/targets/azure_rm_functionapp/aliases3
-rw-r--r--test/integration/targets/azure_rm_functionapp/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_functionapp/tasks/main.yml131
-rw-r--r--test/integration/targets/azure_rm_gallery/aliases6
-rw-r--r--test/integration/targets/azure_rm_gallery/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_gallery/tasks/main.yml342
-rw-r--r--test/integration/targets/azure_rm_hdinsightcluster/aliases6
-rw-r--r--test/integration/targets/azure_rm_hdinsightcluster/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_hdinsightcluster/tasks/main.yml244
-rw-r--r--test/integration/targets/azure_rm_image/aliases4
-rw-r--r--test/integration/targets/azure_rm_image/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_image/tasks/main.yml171
-rw-r--r--test/integration/targets/azure_rm_iothub/aliases4
-rw-r--r--test/integration/targets/azure_rm_iothub/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_iothub/tasks/main.yml172
-rw-r--r--test/integration/targets/azure_rm_keyvault/aliases5
-rw-r--r--test/integration/targets/azure_rm_keyvault/lookup_plugins/azure_service_principal_attribute.py94
-rw-r--r--test/integration/targets/azure_rm_keyvault/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_keyvault/tasks/main.yml270
-rw-r--r--test/integration/targets/azure_rm_loadbalancer/aliases3
-rw-r--r--test/integration/targets/azure_rm_loadbalancer/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_loadbalancer/tasks/main.yml298
-rw-r--r--test/integration/targets/azure_rm_lock/aliases4
-rw-r--r--test/integration/targets/azure_rm_lock/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_lock/tasks/main.yml99
-rw-r--r--test/integration/targets/azure_rm_loganalyticsworkspace/aliases4
-rw-r--r--test/integration/targets/azure_rm_loganalyticsworkspace/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_loganalyticsworkspace/tasks/main.yml128
-rw-r--r--test/integration/targets/azure_rm_manageddisk/aliases4
-rw-r--r--test/integration/targets/azure_rm_manageddisk/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_manageddisk/tasks/main.yml204
-rw-r--r--test/integration/targets/azure_rm_mariadbserver/aliases8
-rw-r--r--test/integration/targets/azure_rm_mariadbserver/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_mariadbserver/tasks/main.yml640
-rw-r--r--test/integration/targets/azure_rm_monitorlogprofile/aliases3
-rw-r--r--test/integration/targets/azure_rm_monitorlogprofile/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_monitorlogprofile/tasks/main.yml104
-rw-r--r--test/integration/targets/azure_rm_mysqlserver/aliases10
-rw-r--r--test/integration/targets/azure_rm_mysqlserver/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_mysqlserver/tasks/main.yml640
-rw-r--r--test/integration/targets/azure_rm_networkinterface/aliases4
-rw-r--r--test/integration/targets/azure_rm_networkinterface/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_networkinterface/tasks/main.yml554
-rw-r--r--test/integration/targets/azure_rm_postgresqlserver/aliases10
-rw-r--r--test/integration/targets/azure_rm_postgresqlserver/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_postgresqlserver/tasks/main.yml610
-rw-r--r--test/integration/targets/azure_rm_publicipaddress/aliases4
-rw-r--r--test/integration/targets/azure_rm_publicipaddress/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_publicipaddress/tasks/main.yml113
-rw-r--r--test/integration/targets/azure_rm_rediscache/aliases5
-rw-r--r--test/integration/targets/azure_rm_rediscache/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_rediscache/tasks/main.yml317
-rw-r--r--test/integration/targets/azure_rm_resource/aliases3
-rw-r--r--test/integration/targets/azure_rm_resource/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_resource/tasks/main.yml158
-rw-r--r--test/integration/targets/azure_rm_resourcegroup/aliases4
-rw-r--r--test/integration/targets/azure_rm_resourcegroup/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_resourcegroup/tasks/main.yml39
-rw-r--r--test/integration/targets/azure_rm_roledefinition/aliases3
-rw-r--r--test/integration/targets/azure_rm_roledefinition/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_roledefinition/tasks/main.yml207
-rw-r--r--test/integration/targets/azure_rm_routetable/aliases3
-rw-r--r--test/integration/targets/azure_rm_routetable/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_routetable/tasks/main.yml183
-rw-r--r--test/integration/targets/azure_rm_securitygroup/aliases4
-rw-r--r--test/integration/targets/azure_rm_securitygroup/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_securitygroup/tasks/main.yml302
-rw-r--r--test/integration/targets/azure_rm_servicebus/aliases3
-rw-r--r--test/integration/targets/azure_rm_servicebus/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_servicebus/tasks/main.yml169
-rw-r--r--test/integration/targets/azure_rm_sqlserver/aliases8
-rw-r--r--test/integration/targets/azure_rm_sqlserver/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_sqlserver/tasks/main.yml419
-rw-r--r--test/integration/targets/azure_rm_storageaccount/aliases4
-rw-r--r--test/integration/targets/azure_rm_storageaccount/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_storageaccount/tasks/main.yml158
-rw-r--r--test/integration/targets/azure_rm_storageblob/aliases4
-rw-r--r--test/integration/targets/azure_rm_storageblob/files/Ratings.pngbin35164 -> 0 bytes
-rw-r--r--test/integration/targets/azure_rm_storageblob/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_storageblob/tasks/main.yml119
-rw-r--r--test/integration/targets/azure_rm_subnet/aliases3
-rw-r--r--test/integration/targets/azure_rm_subnet/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_subnet/tasks/main.yml182
-rw-r--r--test/integration/targets/azure_rm_trafficmanagerprofile/aliases3
-rw-r--r--test/integration/targets/azure_rm_trafficmanagerprofile/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_trafficmanagerprofile/tasks/main.yml289
-rw-r--r--test/integration/targets/azure_rm_virtualmachine/aliases4
-rw-r--r--test/integration/targets/azure_rm_virtualmachine/inventory.yml66
-rw-r--r--test/integration/targets/azure_rm_virtualmachine/main.yml7
-rwxr-xr-xtest/integration/targets/azure_rm_virtualmachine/runme.sh5
-rw-r--r--test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_deallocate.yml90
-rw-r--r--test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_dual_nic.yml131
-rw-r--r--test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_invalid.yml35
-rw-r--r--test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_minimal.yml118
-rw-r--r--test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_no_public_ip.yml41
-rw-r--r--test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_public_ip.yml311
-rw-r--r--test/integration/targets/azure_rm_virtualmachine/tasks/setup.yml26
-rw-r--r--test/integration/targets/azure_rm_virtualmachineextension/aliases4
-rw-r--r--test/integration/targets/azure_rm_virtualmachineextension/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_virtualmachineextension/tasks/main.yml181
-rw-r--r--test/integration/targets/azure_rm_virtualmachineimage_info/aliases4
-rw-r--r--test/integration/targets/azure_rm_virtualmachineimage_info/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_virtualmachineimage_info/tasks/main.yml43
-rw-r--r--test/integration/targets/azure_rm_virtualmachinescaleset/aliases7
-rw-r--r--test/integration/targets/azure_rm_virtualmachinescaleset/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_virtualmachinescaleset/tasks/main.yml617
-rw-r--r--test/integration/targets/azure_rm_virtualnetwork/aliases3
-rw-r--r--test/integration/targets/azure_rm_virtualnetwork/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_virtualnetwork/tasks/main.yml181
-rw-r--r--test/integration/targets/azure_rm_virtualnetworkgateway/aliases3
-rw-r--r--test/integration/targets/azure_rm_virtualnetworkgateway/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_virtualnetworkgateway/tasks/main.yml126
-rw-r--r--test/integration/targets/azure_rm_virtualnetworkpeering/aliases3
-rw-r--r--test/integration/targets/azure_rm_virtualnetworkpeering/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_virtualnetworkpeering/tasks/main.yml126
-rw-r--r--test/integration/targets/azure_rm_webapp/aliases5
-rw-r--r--test/integration/targets/azure_rm_webapp/meta/main.yml2
-rw-r--r--test/integration/targets/azure_rm_webapp/tasks/main.yml434
-rw-r--r--test/sanity/ignore.txt665
344 files changed, 0 insertions, 82185 deletions
diff --git a/lib/ansible/module_utils/azure_rm_common.py b/lib/ansible/module_utils/azure_rm_common.py
deleted file mode 100644
index e995daa02e..0000000000
--- a/lib/ansible/module_utils/azure_rm_common.py
+++ /dev/null
@@ -1,1473 +0,0 @@
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-import os
-import re
-import types
-import copy
-import inspect
-import traceback
-import json
-
-from os.path import expanduser
-
-from ansible.module_utils.basic import AnsibleModule, missing_required_lib
-try:
- from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
-except Exception:
- ANSIBLE_VERSION = 'unknown'
-from ansible.module_utils.six.moves import configparser
-import ansible.module_utils.six.moves.urllib.parse as urlparse
-
-AZURE_COMMON_ARGS = dict(
- auth_source=dict(
- type='str',
- choices=['auto', 'cli', 'env', 'credential_file', 'msi']
- ),
- profile=dict(type='str'),
- subscription_id=dict(type='str'),
- client_id=dict(type='str', no_log=True),
- secret=dict(type='str', no_log=True),
- tenant=dict(type='str', no_log=True),
- ad_user=dict(type='str', no_log=True),
- password=dict(type='str', no_log=True),
- cloud_environment=dict(type='str', default='AzureCloud'),
- cert_validation_mode=dict(type='str', choices=['validate', 'ignore']),
- api_profile=dict(type='str', default='latest'),
- adfs_authority_url=dict(type='str', default=None)
-)
-
-AZURE_CREDENTIAL_ENV_MAPPING = dict(
- profile='AZURE_PROFILE',
- subscription_id='AZURE_SUBSCRIPTION_ID',
- client_id='AZURE_CLIENT_ID',
- secret='AZURE_SECRET',
- tenant='AZURE_TENANT',
- ad_user='AZURE_AD_USER',
- password='AZURE_PASSWORD',
- cloud_environment='AZURE_CLOUD_ENVIRONMENT',
- cert_validation_mode='AZURE_CERT_VALIDATION_MODE',
- adfs_authority_url='AZURE_ADFS_AUTHORITY_URL'
-)
-
-
-class SDKProfile(object): # pylint: disable=too-few-public-methods
-
- def __init__(self, default_api_version, profile=None):
- """Constructor.
-
- :param str default_api_version: Default API version if not overridden by a profile. Nullable.
- :param profile: A dict operation group name to API version.
- :type profile: dict[str, str]
- """
- self.profile = profile if profile is not None else {}
- self.profile[None] = default_api_version
-
- @property
- def default_api_version(self):
- return self.profile[None]
-
-
-# FUTURE: this should come from the SDK or an external location.
-# For now, we have to copy from azure-cli
-AZURE_API_PROFILES = {
- 'latest': {
- 'ContainerInstanceManagementClient': '2018-02-01-preview',
- 'ComputeManagementClient': dict(
- default_api_version='2018-10-01',
- resource_skus='2018-10-01',
- disks='2018-06-01',
- snapshots='2018-10-01',
- virtual_machine_run_commands='2018-10-01'
- ),
- 'NetworkManagementClient': '2018-08-01',
- 'ResourceManagementClient': '2017-05-10',
- 'StorageManagementClient': '2017-10-01',
- 'WebSiteManagementClient': '2018-02-01',
- 'PostgreSQLManagementClient': '2017-12-01',
- 'MySQLManagementClient': '2017-12-01',
- 'MariaDBManagementClient': '2019-03-01',
- 'ManagementLockClient': '2016-09-01'
- },
- '2019-03-01-hybrid': {
- 'StorageManagementClient': '2017-10-01',
- 'NetworkManagementClient': '2017-10-01',
- 'ComputeManagementClient': SDKProfile('2017-12-01', {
- 'resource_skus': '2017-09-01',
- 'disks': '2017-03-30',
- 'snapshots': '2017-03-30'
- }),
- 'ManagementLinkClient': '2016-09-01',
- 'ManagementLockClient': '2016-09-01',
- 'PolicyClient': '2016-12-01',
- 'ResourceManagementClient': '2018-05-01',
- 'SubscriptionClient': '2016-06-01',
- 'DnsManagementClient': '2016-04-01',
- 'KeyVaultManagementClient': '2016-10-01',
- 'AuthorizationManagementClient': SDKProfile('2015-07-01', {
- 'classic_administrators': '2015-06-01',
- 'policy_assignments': '2016-12-01',
- 'policy_definitions': '2016-12-01'
- }),
- 'KeyVaultClient': '2016-10-01',
- 'azure.multiapi.storage': '2017-11-09',
- 'azure.multiapi.cosmosdb': '2017-04-17'
- },
- '2018-03-01-hybrid': {
- 'StorageManagementClient': '2016-01-01',
- 'NetworkManagementClient': '2017-10-01',
- 'ComputeManagementClient': SDKProfile('2017-03-30'),
- 'ManagementLinkClient': '2016-09-01',
- 'ManagementLockClient': '2016-09-01',
- 'PolicyClient': '2016-12-01',
- 'ResourceManagementClient': '2018-02-01',
- 'SubscriptionClient': '2016-06-01',
- 'DnsManagementClient': '2016-04-01',
- 'KeyVaultManagementClient': '2016-10-01',
- 'AuthorizationManagementClient': SDKProfile('2015-07-01', {
- 'classic_administrators': '2015-06-01'
- }),
- 'KeyVaultClient': '2016-10-01',
- 'azure.multiapi.storage': '2017-04-17',
- 'azure.multiapi.cosmosdb': '2017-04-17'
- },
- '2017-03-09-profile': {
- 'StorageManagementClient': '2016-01-01',
- 'NetworkManagementClient': '2015-06-15',
- 'ComputeManagementClient': SDKProfile('2016-03-30'),
- 'ManagementLinkClient': '2016-09-01',
- 'ManagementLockClient': '2015-01-01',
- 'PolicyClient': '2015-10-01-preview',
- 'ResourceManagementClient': '2016-02-01',
- 'SubscriptionClient': '2016-06-01',
- 'DnsManagementClient': '2016-04-01',
- 'KeyVaultManagementClient': '2016-10-01',
- 'AuthorizationManagementClient': SDKProfile('2015-07-01', {
- 'classic_administrators': '2015-06-01'
- }),
- 'KeyVaultClient': '2016-10-01',
- 'azure.multiapi.storage': '2015-04-05'
- }
-}
-
-AZURE_TAG_ARGS = dict(
- tags=dict(type='dict'),
- append_tags=dict(type='bool', default=True),
-)
-
-AZURE_COMMON_REQUIRED_IF = [
- ('log_mode', 'file', ['log_path'])
-]
-
-ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION)
-CLOUDSHELL_USER_AGENT_KEY = 'AZURE_HTTP_USER_AGENT'
-VSCODEEXT_USER_AGENT_KEY = 'VSCODEEXT_USER_AGENT'
-
-CIDR_PATTERN = re.compile(r"(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1"
- r"[0-9]{2}|2[0-4][0-9]|25[0-5])(/([0-9]|[1-2][0-9]|3[0-2]))")
-
-AZURE_SUCCESS_STATE = "Succeeded"
-AZURE_FAILED_STATE = "Failed"
-
-HAS_AZURE = True
-HAS_AZURE_EXC = None
-HAS_AZURE_CLI_CORE = True
-HAS_AZURE_CLI_CORE_EXC = None
-
-HAS_MSRESTAZURE = True
-HAS_MSRESTAZURE_EXC = None
-
-try:
- import importlib
-except ImportError:
- # This passes the sanity import test, but does not provide a user friendly error message.
- # Doing so would require catching Exception for all imports of Azure dependencies in modules and module_utils.
- importlib = None
-
-try:
- from packaging.version import Version
- HAS_PACKAGING_VERSION = True
- HAS_PACKAGING_VERSION_EXC = None
-except ImportError:
- Version = None
- HAS_PACKAGING_VERSION = False
- HAS_PACKAGING_VERSION_EXC = traceback.format_exc()
-
-# NB: packaging issue sometimes cause msrestazure not to be installed, check it separately
-try:
- from msrest.serialization import Serializer
-except ImportError:
- HAS_MSRESTAZURE_EXC = traceback.format_exc()
- HAS_MSRESTAZURE = False
-
-try:
- from enum import Enum
- from msrestazure.azure_active_directory import AADTokenCredentials
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.azure_active_directory import MSIAuthentication
- from msrestazure.tools import parse_resource_id, resource_id, is_valid_resource_id
- from msrestazure import azure_cloud
- from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials
- from azure.mgmt.monitor.version import VERSION as monitor_client_version
- from azure.mgmt.network.version import VERSION as network_client_version
- from azure.mgmt.storage.version import VERSION as storage_client_version
- from azure.mgmt.compute.version import VERSION as compute_client_version
- from azure.mgmt.resource.version import VERSION as resource_client_version
- from azure.mgmt.dns.version import VERSION as dns_client_version
- from azure.mgmt.web.version import VERSION as web_client_version
- from azure.mgmt.network import NetworkManagementClient
- from azure.mgmt.resource.resources import ResourceManagementClient
- from azure.mgmt.resource.subscriptions import SubscriptionClient
- from azure.mgmt.storage import StorageManagementClient
- from azure.mgmt.compute import ComputeManagementClient
- from azure.mgmt.dns import DnsManagementClient
- from azure.mgmt.monitor import MonitorManagementClient
- from azure.mgmt.web import WebSiteManagementClient
- from azure.mgmt.containerservice import ContainerServiceClient
- from azure.mgmt.marketplaceordering import MarketplaceOrderingAgreements
- from azure.mgmt.trafficmanager import TrafficManagerManagementClient
- from azure.storage.cloudstorageaccount import CloudStorageAccount
- from azure.storage.blob import PageBlobService, BlockBlobService
- from adal.authentication_context import AuthenticationContext
- from azure.mgmt.sql import SqlManagementClient
- from azure.mgmt.servicebus import ServiceBusManagementClient
- import azure.mgmt.servicebus.models as ServicebusModel
- from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
- from azure.mgmt.rdbms.mysql import MySQLManagementClient
- from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
- from azure.mgmt.containerregistry import ContainerRegistryManagementClient
- from azure.mgmt.containerinstance import ContainerInstanceManagementClient
- from azure.mgmt.loganalytics import LogAnalyticsManagementClient
- import azure.mgmt.loganalytics.models as LogAnalyticsModels
- from azure.mgmt.automation import AutomationClient
- import azure.mgmt.automation.models as AutomationModel
- from azure.mgmt.iothub import IotHubClient
- from azure.mgmt.iothub import models as IoTHubModels
- from msrest.service_client import ServiceClient
- from msrestazure import AzureConfiguration
- from msrest.authentication import Authentication
- from azure.mgmt.resource.locks import ManagementLockClient
-except ImportError as exc:
- Authentication = object
- HAS_AZURE_EXC = traceback.format_exc()
- HAS_AZURE = False
-
-from base64 import b64encode, b64decode
-from hashlib import sha256
-from hmac import HMAC
-from time import time
-
-try:
- from urllib import (urlencode, quote_plus)
-except ImportError:
- from urllib.parse import (urlencode, quote_plus)
-
-try:
- from azure.cli.core.util import CLIError
- from azure.common.credentials import get_azure_cli_credentials, get_cli_profile
- from azure.common.cloud import get_cli_active_cloud
-except ImportError:
- HAS_AZURE_CLI_CORE = False
- HAS_AZURE_CLI_CORE_EXC = None
- CLIError = Exception
-
-
-def azure_id_to_dict(id):
- pieces = re.sub(r'^\/', '', id).split('/')
- result = {}
- index = 0
- while index < len(pieces) - 1:
- result[pieces[index]] = pieces[index + 1]
- index += 1
- return result
-
-
-def format_resource_id(val, subscription_id, namespace, types, resource_group):
- return resource_id(name=val,
- resource_group=resource_group,
- namespace=namespace,
- type=types,
- subscription=subscription_id) if not is_valid_resource_id(val) else val
-
-
-def normalize_location_name(name):
- return name.replace(' ', '').lower()
-
-
-# FUTURE: either get this from the requirements file (if we can be sure it's always available at runtime)
-# or generate the requirements files from this so we only have one source of truth to maintain...
-AZURE_PKG_VERSIONS = {
- 'StorageManagementClient': {
- 'package_name': 'storage',
- 'expected_version': '3.1.0'
- },
- 'ComputeManagementClient': {
- 'package_name': 'compute',
- 'expected_version': '4.4.0'
- },
- 'ContainerInstanceManagementClient': {
- 'package_name': 'containerinstance',
- 'expected_version': '0.4.0'
- },
- 'NetworkManagementClient': {
- 'package_name': 'network',
- 'expected_version': '2.3.0'
- },
- 'ResourceManagementClient': {
- 'package_name': 'resource',
- 'expected_version': '2.1.0'
- },
- 'DnsManagementClient': {
- 'package_name': 'dns',
- 'expected_version': '2.1.0'
- },
- 'WebSiteManagementClient': {
- 'package_name': 'web',
- 'expected_version': '0.41.0'
- },
- 'TrafficManagerManagementClient': {
- 'package_name': 'trafficmanager',
- 'expected_version': '0.50.0'
- },
-} if HAS_AZURE else {}
-
-
-AZURE_MIN_RELEASE = '2.0.0'
-
-
-class AzureRMModuleBase(object):
- def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False,
- mutually_exclusive=None, required_together=None,
- required_one_of=None, add_file_common_args=False, supports_check_mode=False,
- required_if=None, supports_tags=True, facts_module=False, skip_exec=False):
-
- merged_arg_spec = dict()
- merged_arg_spec.update(AZURE_COMMON_ARGS)
- if supports_tags:
- merged_arg_spec.update(AZURE_TAG_ARGS)
-
- if derived_arg_spec:
- merged_arg_spec.update(derived_arg_spec)
-
- merged_required_if = list(AZURE_COMMON_REQUIRED_IF)
- if required_if:
- merged_required_if += required_if
-
- self.module = AnsibleModule(argument_spec=merged_arg_spec,
- bypass_checks=bypass_checks,
- no_log=no_log,
- mutually_exclusive=mutually_exclusive,
- required_together=required_together,
- required_one_of=required_one_of,
- add_file_common_args=add_file_common_args,
- supports_check_mode=supports_check_mode,
- required_if=merged_required_if)
-
- if not HAS_PACKAGING_VERSION:
- self.fail(msg=missing_required_lib('packaging'),
- exception=HAS_PACKAGING_VERSION_EXC)
-
- if not HAS_MSRESTAZURE:
- self.fail(msg=missing_required_lib('msrestazure'),
- exception=HAS_MSRESTAZURE_EXC)
-
- if not HAS_AZURE:
- self.fail(msg=missing_required_lib('ansible[azure] (azure >= {0})'.format(AZURE_MIN_RELEASE)),
- exception=HAS_AZURE_EXC)
-
- self._network_client = None
- self._storage_client = None
- self._resource_client = None
- self._compute_client = None
- self._dns_client = None
- self._web_client = None
- self._marketplace_client = None
- self._sql_client = None
- self._mysql_client = None
- self._mariadb_client = None
- self._postgresql_client = None
- self._containerregistry_client = None
- self._containerinstance_client = None
- self._containerservice_client = None
- self._managedcluster_client = None
- self._traffic_manager_management_client = None
- self._monitor_client = None
- self._resource = None
- self._log_analytics_client = None
- self._servicebus_client = None
- self._automation_client = None
- self._IoThub_client = None
- self._lock_client = None
-
- self.check_mode = self.module.check_mode
- self.api_profile = self.module.params.get('api_profile')
- self.facts_module = facts_module
- # self.debug = self.module.params.get('debug')
-
- # delegate auth to AzureRMAuth class (shared with all plugin types)
- self.azure_auth = AzureRMAuth(fail_impl=self.fail, **self.module.params)
-
- # common parameter validation
- if self.module.params.get('tags'):
- self.validate_tags(self.module.params['tags'])
-
- if not skip_exec:
- res = self.exec_module(**self.module.params)
- self.module.exit_json(**res)
-
- def check_client_version(self, client_type):
- # Ensure Azure modules are at least 2.0.0rc5.
- package_version = AZURE_PKG_VERSIONS.get(client_type.__name__, None)
- if package_version is not None:
- client_name = package_version.get('package_name')
- try:
- client_module = importlib.import_module(client_type.__module__)
- client_version = client_module.VERSION
- except (RuntimeError, AttributeError):
- # can't get at the module version for some reason, just fail silently...
- return
- expected_version = package_version.get('expected_version')
- if Version(client_version) < Version(expected_version):
- self.fail("Installed azure-mgmt-{0} client version is {1}. The minimum supported version is {2}. Try "
- "`pip install ansible[azure]`".format(client_name, client_version, expected_version))
- if Version(client_version) != Version(expected_version):
- self.module.warn("Installed azure-mgmt-{0} client version is {1}. The expected version is {2}. Try "
- "`pip install ansible[azure]`".format(client_name, client_version, expected_version))
-
- def exec_module(self, **kwargs):
- self.fail("Error: {0} failed to implement exec_module method.".format(self.__class__.__name__))
-
- def fail(self, msg, **kwargs):
- '''
- Shortcut for calling module.fail()
-
- :param msg: Error message text.
- :param kwargs: Any key=value pairs
- :return: None
- '''
- self.module.fail_json(msg=msg, **kwargs)
-
- def deprecate(self, msg, version=None):
- self.module.deprecate(msg, version)
-
- def log(self, msg, pretty_print=False):
- if pretty_print:
- self.module.debug(json.dumps(msg, indent=4, sort_keys=True))
- else:
- self.module.debug(msg)
-
- def validate_tags(self, tags):
- '''
- Check if tags dictionary contains string:string pairs.
-
- :param tags: dictionary of string:string pairs
- :return: None
- '''
- if not self.facts_module:
- if not isinstance(tags, dict):
- self.fail("Tags must be a dictionary of string:string values.")
- for key, value in tags.items():
- if not isinstance(value, str):
- self.fail("Tags values must be strings. Found {0}:{1}".format(str(key), str(value)))
-
- def update_tags(self, tags):
- '''
- Call from the module to update metadata tags. Returns tuple
- with bool indicating if there was a change and dict of new
- tags to assign to the object.
-
- :param tags: metadata tags from the object
- :return: bool, dict
- '''
- tags = tags or dict()
- new_tags = copy.copy(tags) if isinstance(tags, dict) else dict()
- param_tags = self.module.params.get('tags') if isinstance(self.module.params.get('tags'), dict) else dict()
- append_tags = self.module.params.get('append_tags') if self.module.params.get('append_tags') is not None else True
- changed = False
- # check add or update
- for key, value in param_tags.items():
- if not new_tags.get(key) or new_tags[key] != value:
- changed = True
- new_tags[key] = value
- # check remove
- if not append_tags:
- for key, value in tags.items():
- if not param_tags.get(key):
- new_tags.pop(key)
- changed = True
- return changed, new_tags
-
- def has_tags(self, obj_tags, tag_list):
- '''
- Used in fact modules to compare object tags to list of parameter tags. Return true if list of parameter tags
- exists in object tags.
-
- :param obj_tags: dictionary of tags from an Azure object.
- :param tag_list: list of tag keys or tag key:value pairs
- :return: bool
- '''
-
- if not obj_tags and tag_list:
- return False
-
- if not tag_list:
- return True
-
- matches = 0
- result = False
- for tag in tag_list:
- tag_key = tag
- tag_value = None
- if ':' in tag:
- tag_key, tag_value = tag.split(':')
- if tag_value and obj_tags.get(tag_key) == tag_value:
- matches += 1
- elif not tag_value and obj_tags.get(tag_key):
- matches += 1
- if matches == len(tag_list):
- result = True
- return result
-
- def get_resource_group(self, resource_group):
- '''
- Fetch a resource group.
-
- :param resource_group: name of a resource group
- :return: resource group object
- '''
- try:
- return self.rm_client.resource_groups.get(resource_group)
- except CloudError as cloud_error:
- self.fail("Error retrieving resource group {0} - {1}".format(resource_group, cloud_error.message))
- except Exception as exc:
- self.fail("Error retrieving resource group {0} - {1}".format(resource_group, str(exc)))
-
- def parse_resource_to_dict(self, resource):
- '''
- Return a dict of the give resource, which contains name and resource group.
-
- :param resource: It can be a resource name, id or a dict contains name and resource group.
- '''
- resource_dict = parse_resource_id(resource) if not isinstance(resource, dict) else resource
- resource_dict['resource_group'] = resource_dict.get('resource_group', self.resource_group)
- resource_dict['subscription_id'] = resource_dict.get('subscription_id', self.subscription_id)
- return resource_dict
-
- def serialize_obj(self, obj, class_name, enum_modules=None):
- '''
- Return a JSON representation of an Azure object.
-
- :param obj: Azure object
- :param class_name: Name of the object's class
- :param enum_modules: List of module names to build enum dependencies from.
- :return: serialized result
- '''
- enum_modules = [] if enum_modules is None else enum_modules
-
- dependencies = dict()
- if enum_modules:
- for module_name in enum_modules:
- mod = importlib.import_module(module_name)
- for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass):
- dependencies[mod_class_name] = mod_class_obj
- self.log("dependencies: ")
- self.log(str(dependencies))
- serializer = Serializer(classes=dependencies)
- return serializer.body(obj, class_name, keep_readonly=True)
-
- def get_poller_result(self, poller, wait=5):
- '''
- Consistent method of waiting on and retrieving results from Azure's long poller
-
- :param poller Azure poller object
- :return object resulting from the original request
- '''
- try:
- delay = wait
- while not poller.done():
- self.log("Waiting for {0} sec".format(delay))
- poller.wait(timeout=delay)
- return poller.result()
- except Exception as exc:
- self.log(str(exc))
- raise
-
- def check_provisioning_state(self, azure_object, requested_state='present'):
- '''
- Check an Azure object's provisioning state. If something did not complete the provisioning
- process, then we cannot operate on it.
-
- :param azure_object An object such as a subnet, storageaccount, etc. Must have provisioning_state
- and name attributes.
- :return None
- '''
-
- if hasattr(azure_object, 'properties') and hasattr(azure_object.properties, 'provisioning_state') and \
- hasattr(azure_object, 'name'):
- # resource group object fits this model
- if isinstance(azure_object.properties.provisioning_state, Enum):
- if azure_object.properties.provisioning_state.value != AZURE_SUCCESS_STATE and \
- requested_state != 'absent':
- self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
- azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
- return
- if azure_object.properties.provisioning_state != AZURE_SUCCESS_STATE and \
- requested_state != 'absent':
- self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
- azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE))
- return
-
- if hasattr(azure_object, 'provisioning_state') or not hasattr(azure_object, 'name'):
- if isinstance(azure_object.provisioning_state, Enum):
- if azure_object.provisioning_state.value != AZURE_SUCCESS_STATE and requested_state != 'absent':
- self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
- azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
- return
- if azure_object.provisioning_state != AZURE_SUCCESS_STATE and requested_state != 'absent':
- self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format(
- azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE))
-
- def get_blob_client(self, resource_group_name, storage_account_name, storage_blob_type='block'):
- keys = dict()
- try:
- # Get keys from the storage account
- self.log('Getting keys')
- account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name)
- except Exception as exc:
- self.fail("Error getting keys for account {0} - {1}".format(storage_account_name, str(exc)))
-
- try:
- self.log('Create blob service')
- if storage_blob_type == 'page':
- return PageBlobService(endpoint_suffix=self._cloud_environment.suffixes.storage_endpoint,
- account_name=storage_account_name,
- account_key=account_keys.keys[0].value)
- elif storage_blob_type == 'block':
- return BlockBlobService(endpoint_suffix=self._cloud_environment.suffixes.storage_endpoint,
- account_name=storage_account_name,
- account_key=account_keys.keys[0].value)
- else:
- raise Exception("Invalid storage blob type defined.")
- except Exception as exc:
- self.fail("Error creating blob service client for storage account {0} - {1}".format(storage_account_name,
- str(exc)))
-
- def create_default_pip(self, resource_group, location, public_ip_name, allocation_method='Dynamic', sku=None):
- '''
- Create a default public IP address <public_ip_name> to associate with a network interface.
- If a PIP address matching <public_ip_name> exists, return it. Otherwise, create one.
-
- :param resource_group: name of an existing resource group
- :param location: a valid azure location
- :param public_ip_name: base name to assign the public IP address
- :param allocation_method: one of 'Static' or 'Dynamic'
- :param sku: sku
- :return: PIP object
- '''
- pip = None
-
- self.log("Starting create_default_pip {0}".format(public_ip_name))
- self.log("Check to see if public IP {0} exists".format(public_ip_name))
- try:
- pip = self.network_client.public_ip_addresses.get(resource_group, public_ip_name)
- except CloudError:
- pass
-
- if pip:
- self.log("Public ip {0} found.".format(public_ip_name))
- self.check_provisioning_state(pip)
- return pip
-
- params = self.network_models.PublicIPAddress(
- location=location,
- public_ip_allocation_method=allocation_method,
- sku=sku
- )
- self.log('Creating default public IP {0}'.format(public_ip_name))
- try:
- poller = self.network_client.public_ip_addresses.create_or_update(resource_group, public_ip_name, params)
- except Exception as exc:
- self.fail("Error creating {0} - {1}".format(public_ip_name, str(exc)))
-
- return self.get_poller_result(poller)
-
- def create_default_securitygroup(self, resource_group, location, security_group_name, os_type, open_ports):
- '''
- Create a default security group <security_group_name> to associate with a network interface. If a security group matching
- <security_group_name> exists, return it. Otherwise, create one.
-
- :param resource_group: Resource group name
- :param location: azure location name
- :param security_group_name: base name to use for the security group
- :param os_type: one of 'Windows' or 'Linux'. Determins any default rules added to the security group.
- :param ssh_port: for os_type 'Linux' port used in rule allowing SSH access.
- :param rdp_port: for os_type 'Windows' port used in rule allowing RDP access.
- :return: security_group object
- '''
- group = None
-
- self.log("Create security group {0}".format(security_group_name))
- self.log("Check to see if security group {0} exists".format(security_group_name))
- try:
- group = self.network_client.network_security_groups.get(resource_group, security_group_name)
- except CloudError:
- pass
-
- if group:
- self.log("Security group {0} found.".format(security_group_name))
- self.check_provisioning_state(group)
- return group
-
- parameters = self.network_models.NetworkSecurityGroup()
- parameters.location = location
-
- if not open_ports:
- # Open default ports based on OS type
- if os_type == 'Linux':
- # add an inbound SSH rule
- parameters.security_rules = [
- self.network_models.SecurityRule(protocol='Tcp',
- source_address_prefix='*',
- destination_address_prefix='*',
- access='Allow',
- direction='Inbound',
- description='Allow SSH Access',
- source_port_range='*',
- destination_port_range='22',
- priority=100,
- name='SSH')
- ]
- parameters.location = location
- else:
- # for windows add inbound RDP and WinRM rules
- parameters.security_rules = [
- self.network_models.SecurityRule(protocol='Tcp',
- source_address_prefix='*',
- destination_address_prefix='*',
- access='Allow',
- direction='Inbound',
- description='Allow RDP port 3389',
- source_port_range='*',
- destination_port_range='3389',
- priority=100,
- name='RDP01'),
- self.network_models.SecurityRule(protocol='Tcp',
- source_address_prefix='*',
- destination_address_prefix='*',
- access='Allow',
- direction='Inbound',
- description='Allow WinRM HTTPS port 5986',
- source_port_range='*',
- destination_port_range='5986',
- priority=101,
- name='WinRM01'),
- ]
- else:
- # Open custom ports
- parameters.security_rules = []
- priority = 100
- for port in open_ports:
- priority += 1
- rule_name = "Rule_{0}".format(priority)
- parameters.security_rules.append(
- self.network_models.SecurityRule(protocol='Tcp',
- source_address_prefix='*',
- destination_address_prefix='*',
- access='Allow',
- direction='Inbound',
- source_port_range='*',
- destination_port_range=str(port),
- priority=priority,
- name=rule_name)
- )
-
- self.log('Creating default security group {0}'.format(security_group_name))
- try:
- poller = self.network_client.network_security_groups.create_or_update(resource_group,
- security_group_name,
- parameters)
- except Exception as exc:
- self.fail("Error creating default security rule {0} - {1}".format(security_group_name, str(exc)))
-
- return self.get_poller_result(poller)
-
- @staticmethod
- def _validation_ignore_callback(session, global_config, local_config, **kwargs):
- session.verify = False
-
- def get_api_profile(self, client_type_name, api_profile_name):
- profile_all_clients = AZURE_API_PROFILES.get(api_profile_name)
-
- if not profile_all_clients:
- raise KeyError("unknown Azure API profile: {0}".format(api_profile_name))
-
- profile_raw = profile_all_clients.get(client_type_name, None)
-
- if not profile_raw:
- self.module.warn("Azure API profile {0} does not define an entry for {1}".format(api_profile_name, client_type_name))
-
- if isinstance(profile_raw, dict):
- if not profile_raw.get('default_api_version'):
- raise KeyError("Azure API profile {0} does not define 'default_api_version'".format(api_profile_name))
- return profile_raw
-
- # wrap basic strings in a dict that just defines the default
- return dict(default_api_version=profile_raw)
-
- def get_mgmt_svc_client(self, client_type, base_url=None, api_version=None):
- self.log('Getting management service client {0}'.format(client_type.__name__))
- self.check_client_version(client_type)
-
- client_argspec = inspect.getargspec(client_type.__init__)
-
- if not base_url:
- # most things are resource_manager, don't make everyone specify
- base_url = self.azure_auth._cloud_environment.endpoints.resource_manager
-
- client_kwargs = dict(credentials=self.azure_auth.azure_credentials, subscription_id=self.azure_auth.subscription_id, base_url=base_url)
-
- api_profile_dict = {}
-
- if self.api_profile:
- api_profile_dict = self.get_api_profile(client_type.__name__, self.api_profile)
-
- # unversioned clients won't accept profile; only send it if necessary
- # clients without a version specified in the profile will use the default
- if api_profile_dict and 'profile' in client_argspec.args:
- client_kwargs['profile'] = api_profile_dict
-
- # If the client doesn't accept api_version, it's unversioned.
- # If it does, favor explicitly-specified api_version, fall back to api_profile
- if 'api_version' in client_argspec.args:
- profile_default_version = api_profile_dict.get('default_api_version', None)
- if api_version or profile_default_version:
- client_kwargs['api_version'] = api_version or profile_default_version
- if 'profile' in client_kwargs:
- # remove profile; only pass API version if specified
- client_kwargs.pop('profile')
-
- client = client_type(**client_kwargs)
-
- # FUTURE: remove this once everything exposes models directly (eg, containerinstance)
- try:
- getattr(client, "models")
- except AttributeError:
- def _ansible_get_models(self, *arg, **kwarg):
- return self._ansible_models
-
- setattr(client, '_ansible_models', importlib.import_module(client_type.__module__).models)
- client.models = types.MethodType(_ansible_get_models, client)
-
- client.config = self.add_user_agent(client.config)
-
- if self.azure_auth._cert_validation_mode == 'ignore':
- client.config.session_configuration_callback = self._validation_ignore_callback
-
- return client
-
- def add_user_agent(self, config):
- # Add user agent for Ansible
- config.add_user_agent(ANSIBLE_USER_AGENT)
- # Add user agent when running from Cloud Shell
- if CLOUDSHELL_USER_AGENT_KEY in os.environ:
- config.add_user_agent(os.environ[CLOUDSHELL_USER_AGENT_KEY])
- # Add user agent when running from VSCode extension
- if VSCODEEXT_USER_AGENT_KEY in os.environ:
- config.add_user_agent(os.environ[VSCODEEXT_USER_AGENT_KEY])
- return config
-
- def generate_sas_token(self, **kwags):
- base_url = kwags.get('base_url', None)
- expiry = kwags.get('expiry', time() + 3600)
- key = kwags.get('key', None)
- policy = kwags.get('policy', None)
- url = quote_plus(base_url)
- ttl = int(expiry)
- sign_key = '{0}\n{1}'.format(url, ttl)
- signature = b64encode(HMAC(b64decode(key), sign_key.encode('utf-8'), sha256).digest())
- result = {
- 'sr': url,
- 'sig': signature,
- 'se': str(ttl),
- }
- if policy:
- result['skn'] = policy
- return 'SharedAccessSignature ' + urlencode(result)
-
- def get_data_svc_client(self, **kwags):
- url = kwags.get('base_url', None)
- config = AzureConfiguration(base_url='https://{0}'.format(url))
- config.credentials = AzureSASAuthentication(token=self.generate_sas_token(**kwags))
- config = self.add_user_agent(config)
- return ServiceClient(creds=config.credentials, config=config)
-
- # passthru methods to AzureAuth instance for backcompat
- @property
- def credentials(self):
- return self.azure_auth.credentials
-
- @property
- def _cloud_environment(self):
- return self.azure_auth._cloud_environment
-
- @property
- def subscription_id(self):
- return self.azure_auth.subscription_id
-
- @property
- def storage_client(self):
- self.log('Getting storage client...')
- if not self._storage_client:
- self._storage_client = self.get_mgmt_svc_client(StorageManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2018-07-01')
- return self._storage_client
-
- @property
- def storage_models(self):
- return StorageManagementClient.models("2018-07-01")
-
- @property
- def network_client(self):
- self.log('Getting network client')
- if not self._network_client:
- self._network_client = self.get_mgmt_svc_client(NetworkManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2019-06-01')
- return self._network_client
-
- @property
- def network_models(self):
- self.log("Getting network models...")
- return NetworkManagementClient.models("2018-08-01")
-
- @property
- def rm_client(self):
- self.log('Getting resource manager client')
- if not self._resource_client:
- self._resource_client = self.get_mgmt_svc_client(ResourceManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2017-05-10')
- return self._resource_client
-
- @property
- def rm_models(self):
- self.log("Getting resource manager models")
- return ResourceManagementClient.models("2017-05-10")
-
- @property
- def compute_client(self):
- self.log('Getting compute client')
- if not self._compute_client:
- self._compute_client = self.get_mgmt_svc_client(ComputeManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2019-07-01')
- return self._compute_client
-
- @property
- def compute_models(self):
- self.log("Getting compute models")
- return ComputeManagementClient.models("2019-07-01")
-
- @property
- def dns_client(self):
- self.log('Getting dns client')
- if not self._dns_client:
- self._dns_client = self.get_mgmt_svc_client(DnsManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2018-05-01')
- return self._dns_client
-
- @property
- def dns_models(self):
- self.log("Getting dns models...")
- return DnsManagementClient.models('2018-05-01')
-
- @property
- def web_client(self):
- self.log('Getting web client')
- if not self._web_client:
- self._web_client = self.get_mgmt_svc_client(WebSiteManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2018-02-01')
- return self._web_client
-
- @property
- def containerservice_client(self):
- self.log('Getting container service client')
- if not self._containerservice_client:
- self._containerservice_client = self.get_mgmt_svc_client(ContainerServiceClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2017-07-01')
- return self._containerservice_client
-
- @property
- def managedcluster_models(self):
- self.log("Getting container service models")
- return ContainerServiceClient.models('2018-03-31')
-
- @property
- def managedcluster_client(self):
- self.log('Getting container service client')
- if not self._managedcluster_client:
- self._managedcluster_client = self.get_mgmt_svc_client(ContainerServiceClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2018-03-31')
- return self._managedcluster_client
-
- @property
- def sql_client(self):
- self.log('Getting SQL client')
- if not self._sql_client:
- self._sql_client = self.get_mgmt_svc_client(SqlManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
- return self._sql_client
-
- @property
- def postgresql_client(self):
- self.log('Getting PostgreSQL client')
- if not self._postgresql_client:
- self._postgresql_client = self.get_mgmt_svc_client(PostgreSQLManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
- return self._postgresql_client
-
- @property
- def mysql_client(self):
- self.log('Getting MySQL client')
- if not self._mysql_client:
- self._mysql_client = self.get_mgmt_svc_client(MySQLManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
- return self._mysql_client
-
- @property
- def mariadb_client(self):
- self.log('Getting MariaDB client')
- if not self._mariadb_client:
- self._mariadb_client = self.get_mgmt_svc_client(MariaDBManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
- return self._mariadb_client
-
- @property
- def sql_client(self):
- self.log('Getting SQL client')
- if not self._sql_client:
- self._sql_client = self.get_mgmt_svc_client(SqlManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
- return self._sql_client
-
- @property
- def containerregistry_client(self):
- self.log('Getting container registry mgmt client')
- if not self._containerregistry_client:
- self._containerregistry_client = self.get_mgmt_svc_client(ContainerRegistryManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2017-10-01')
-
- return self._containerregistry_client
-
- @property
- def containerinstance_client(self):
- self.log('Getting container instance mgmt client')
- if not self._containerinstance_client:
- self._containerinstance_client = self.get_mgmt_svc_client(ContainerInstanceManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2018-06-01')
-
- return self._containerinstance_client
-
- @property
- def marketplace_client(self):
- self.log('Getting marketplace agreement client')
- if not self._marketplace_client:
- self._marketplace_client = self.get_mgmt_svc_client(MarketplaceOrderingAgreements,
- base_url=self._cloud_environment.endpoints.resource_manager)
- return self._marketplace_client
-
- @property
- def traffic_manager_management_client(self):
- self.log('Getting traffic manager client')
- if not self._traffic_manager_management_client:
- self._traffic_manager_management_client = self.get_mgmt_svc_client(TrafficManagerManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
- return self._traffic_manager_management_client
-
- @property
- def monitor_client(self):
- self.log('Getting monitor client')
- if not self._monitor_client:
- self._monitor_client = self.get_mgmt_svc_client(MonitorManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
- return self._monitor_client
-
- @property
- def log_analytics_client(self):
- self.log('Getting log analytics client')
- if not self._log_analytics_client:
- self._log_analytics_client = self.get_mgmt_svc_client(LogAnalyticsManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
- return self._log_analytics_client
-
- @property
- def log_analytics_models(self):
- self.log('Getting log analytics models')
- return LogAnalyticsModels
-
- @property
- def servicebus_client(self):
- self.log('Getting servicebus client')
- if not self._servicebus_client:
- self._servicebus_client = self.get_mgmt_svc_client(ServiceBusManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
- return self._servicebus_client
-
- @property
- def servicebus_models(self):
- return ServicebusModel
-
- @property
- def automation_client(self):
- self.log('Getting automation client')
- if not self._automation_client:
- self._automation_client = self.get_mgmt_svc_client(AutomationClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
- return self._automation_client
-
- @property
- def automation_models(self):
- return AutomationModel
-
- @property
- def IoThub_client(self):
- self.log('Getting iothub client')
- if not self._IoThub_client:
- self._IoThub_client = self.get_mgmt_svc_client(IotHubClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
- return self._IoThub_client
-
- @property
- def IoThub_models(self):
- return IoTHubModels
-
- @property
- def automation_client(self):
- self.log('Getting automation client')
- if not self._automation_client:
- self._automation_client = self.get_mgmt_svc_client(AutomationClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
- return self._automation_client
-
- @property
- def automation_models(self):
- return AutomationModel
-
- @property
- def lock_client(self):
- self.log('Getting lock client')
- if not self._lock_client:
- self._lock_client = self.get_mgmt_svc_client(ManagementLockClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2016-09-01')
- return self._lock_client
-
- @property
- def lock_models(self):
- self.log("Getting lock models")
- return ManagementLockClient.models('2016-09-01')
-
-
-class AzureSASAuthentication(Authentication):
- """Simple SAS Authentication.
- An implementation of Authentication in
- https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/authentication.py
-
- :param str token: SAS token
- """
- def __init__(self, token):
- self.token = token
-
- def signed_session(self):
- session = super(AzureSASAuthentication, self).signed_session()
- session.headers['Authorization'] = self.token
- return session
-
-
-class AzureRMAuthException(Exception):
- pass
-
-
-class AzureRMAuth(object):
- def __init__(self, auth_source='auto', profile=None, subscription_id=None, client_id=None, secret=None,
- tenant=None, ad_user=None, password=None, cloud_environment='AzureCloud', cert_validation_mode='validate',
- api_profile='latest', adfs_authority_url=None, fail_impl=None, **kwargs):
-
- if fail_impl:
- self._fail_impl = fail_impl
- else:
- self._fail_impl = self._default_fail_impl
-
- self._cloud_environment = None
- self._adfs_authority_url = None
-
- # authenticate
- self.credentials = self._get_credentials(
- dict(auth_source=auth_source, profile=profile, subscription_id=subscription_id, client_id=client_id, secret=secret,
- tenant=tenant, ad_user=ad_user, password=password, cloud_environment=cloud_environment,
- cert_validation_mode=cert_validation_mode, api_profile=api_profile, adfs_authority_url=adfs_authority_url))
-
- if not self.credentials:
- if HAS_AZURE_CLI_CORE:
- self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
- "define a profile in ~/.azure/credentials, or log in with Azure CLI (`az login`).")
- else:
- self.fail("Failed to get credentials. Either pass as parameters, set environment variables, "
- "define a profile in ~/.azure/credentials, or install Azure CLI and log in (`az login`).")
-
- # cert validation mode precedence: module-arg, credential profile, env, "validate"
- self._cert_validation_mode = cert_validation_mode or self.credentials.get('cert_validation_mode') or \
- os.environ.get('AZURE_CERT_VALIDATION_MODE') or 'validate'
-
- if self._cert_validation_mode not in ['validate', 'ignore']:
- self.fail('invalid cert_validation_mode: {0}'.format(self._cert_validation_mode))
-
- # if cloud_environment specified, look up/build Cloud object
- raw_cloud_env = self.credentials.get('cloud_environment')
- if self.credentials.get('credentials') is not None and raw_cloud_env is not None:
- self._cloud_environment = raw_cloud_env
- elif not raw_cloud_env:
- self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default
- else:
- # try to look up "well-known" values via the name attribute on azure_cloud members
- all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)]
- matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env]
- if len(matched_clouds) == 1:
- self._cloud_environment = matched_clouds[0]
- elif len(matched_clouds) > 1:
- self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env))
- else:
- if not urlparse.urlparse(raw_cloud_env).scheme:
- self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds]))
- try:
- self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env)
- except Exception as e:
- self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message), exception=traceback.format_exc())
-
- if self.credentials.get('subscription_id', None) is None and self.credentials.get('credentials') is None:
- self.fail("Credentials did not include a subscription_id value.")
- self.log("setting subscription_id")
- self.subscription_id = self.credentials['subscription_id']
-
- # get authentication authority
- # for adfs, user could pass in authority or not.
- # for others, use default authority from cloud environment
- if self.credentials.get('adfs_authority_url') is None:
- self._adfs_authority_url = self._cloud_environment.endpoints.active_directory
- else:
- self._adfs_authority_url = self.credentials.get('adfs_authority_url')
-
- # get resource from cloud environment
- self._resource = self._cloud_environment.endpoints.active_directory_resource_id
-
- if self.credentials.get('credentials') is not None:
- # AzureCLI credentials
- self.azure_credentials = self.credentials['credentials']
- elif self.credentials.get('client_id') is not None and \
- self.credentials.get('secret') is not None and \
- self.credentials.get('tenant') is not None:
- self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'],
- secret=self.credentials['secret'],
- tenant=self.credentials['tenant'],
- cloud_environment=self._cloud_environment,
- verify=self._cert_validation_mode == 'validate')
-
- elif self.credentials.get('ad_user') is not None and \
- self.credentials.get('password') is not None and \
- self.credentials.get('client_id') is not None and \
- self.credentials.get('tenant') is not None:
-
- self.azure_credentials = self.acquire_token_with_username_password(
- self._adfs_authority_url,
- self._resource,
- self.credentials['ad_user'],
- self.credentials['password'],
- self.credentials['client_id'],
- self.credentials['tenant'])
-
- elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None:
- tenant = self.credentials.get('tenant')
- if not tenant:
- tenant = 'common' # SDK default
-
- self.azure_credentials = UserPassCredentials(self.credentials['ad_user'],
- self.credentials['password'],
- tenant=tenant,
- cloud_environment=self._cloud_environment,
- verify=self._cert_validation_mode == 'validate')
- else:
- self.fail("Failed to authenticate with provided credentials. Some attributes were missing. "
- "Credentials must include client_id, secret and tenant or ad_user and password, or "
- "ad_user, password, client_id, tenant and adfs_authority_url(optional) for ADFS authentication, or "
- "be logged in using AzureCLI.")
-
- def fail(self, msg, exception=None, **kwargs):
- self._fail_impl(msg)
-
- def _default_fail_impl(self, msg, exception=None, **kwargs):
- raise AzureRMAuthException(msg)
-
- def _get_profile(self, profile="default"):
- path = expanduser("~/.azure/credentials")
- try:
- config = configparser.ConfigParser()
- config.read(path)
- except Exception as exc:
- self.fail("Failed to access {0}. Check that the file exists and you have read "
- "access. {1}".format(path, str(exc)))
- credentials = dict()
- for key in AZURE_CREDENTIAL_ENV_MAPPING:
- try:
- credentials[key] = config.get(profile, key, raw=True)
- except Exception:
- pass
-
- if credentials.get('subscription_id'):
- return credentials
-
- return None
-
- def _get_msi_credentials(self, subscription_id_param=None, **kwargs):
- client_id = kwargs.get('client_id', None)
- credentials = MSIAuthentication(client_id=client_id)
- subscription_id = subscription_id_param or os.environ.get(AZURE_CREDENTIAL_ENV_MAPPING['subscription_id'], None)
- if not subscription_id:
- try:
- # use the first subscription of the MSI
- subscription_client = SubscriptionClient(credentials)
- subscription = next(subscription_client.subscriptions.list())
- subscription_id = str(subscription.subscription_id)
- except Exception as exc:
- self.fail("Failed to get MSI token: {0}. "
- "Please check whether your machine enabled MSI or grant access to any subscription.".format(str(exc)))
- return {
- 'credentials': credentials,
- 'subscription_id': subscription_id
- }
-
- def _get_azure_cli_credentials(self):
- credentials, subscription_id = get_azure_cli_credentials()
- cloud_environment = get_cli_active_cloud()
-
- cli_credentials = {
- 'credentials': credentials,
- 'subscription_id': subscription_id,
- 'cloud_environment': cloud_environment
- }
- return cli_credentials
-
- def _get_env_credentials(self):
- env_credentials = dict()
- for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
- env_credentials[attribute] = os.environ.get(env_variable, None)
-
- if env_credentials['profile']:
- credentials = self._get_profile(env_credentials['profile'])
- return credentials
-
- if env_credentials.get('subscription_id') is not None:
- return env_credentials
-
- return None
-
- # TODO: use explicit kwargs instead of intermediate dict
- def _get_credentials(self, params):
- # Get authentication credentials.
- self.log('Getting credentials')
-
- arg_credentials = dict()
- for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items():
- arg_credentials[attribute] = params.get(attribute, None)
-
- auth_source = params.get('auth_source', None)
- if not auth_source:
- auth_source = os.environ.get('ANSIBLE_AZURE_AUTH_SOURCE', 'auto')
-
- if auth_source == 'msi':
- self.log('Retrieving credenitals from MSI')
- return self._get_msi_credentials(arg_credentials['subscription_id'], client_id=params.get('client_id', None))
-
- if auth_source == 'cli':
- if not HAS_AZURE_CLI_CORE:
- self.fail(msg=missing_required_lib('azure-cli', reason='for `cli` auth_source'),
- exception=HAS_AZURE_CLI_CORE_EXC)
- try:
- self.log('Retrieving credentials from Azure CLI profile')
- cli_credentials = self._get_azure_cli_credentials()
- return cli_credentials
- except CLIError as err:
- self.fail("Azure CLI profile cannot be loaded - {0}".format(err))
-
- if auth_source == 'env':
- self.log('Retrieving credentials from environment')
- env_credentials = self._get_env_credentials()
- return env_credentials
-
- if auth_source == 'credential_file':
- self.log("Retrieving credentials from credential file")
- profile = params.get('profile') or 'default'
- default_credentials = self._get_profile(profile)
- return default_credentials
-
- # auto, precedence: module parameters -> environment variables -> default profile in ~/.azure/credentials
- # try module params
- if arg_credentials['profile'] is not None:
- self.log('Retrieving credentials with profile parameter.')
- credentials = self._get_profile(arg_credentials['profile'])
- return credentials
-
- if arg_credentials['subscription_id']:
- self.log('Received credentials from parameters.')
- return arg_credentials
-
- # try environment
- env_credentials = self._get_env_credentials()
- if env_credentials:
- self.log('Received credentials from env.')
- return env_credentials
-
- # try default profile from ~./azure/credentials
- default_credentials = self._get_profile()
- if default_credentials:
- self.log('Retrieved default profile credentials from ~/.azure/credentials.')
- return default_credentials
-
- try:
- if HAS_AZURE_CLI_CORE:
- self.log('Retrieving credentials from AzureCLI profile')
- cli_credentials = self._get_azure_cli_credentials()
- return cli_credentials
- except CLIError as ce:
- self.log('Error getting AzureCLI profile credentials - {0}'.format(ce))
-
- return None
-
- def acquire_token_with_username_password(self, authority, resource, username, password, client_id, tenant):
- authority_uri = authority
-
- if tenant is not None:
- authority_uri = authority + '/' + tenant
-
- context = AuthenticationContext(authority_uri)
- token_response = context.acquire_token_with_username_password(resource, username, password, client_id)
-
- return AADTokenCredentials(token_response)
-
- def log(self, msg, pretty_print=False):
- pass
- # Use only during module development
- # if self.debug:
- # log_file = open('azure_rm.log', 'a')
- # if pretty_print:
- # log_file.write(json.dumps(msg, indent=4, sort_keys=True))
- # else:
- # log_file.write(msg + u'\n')
diff --git a/lib/ansible/module_utils/azure_rm_common_ext.py b/lib/ansible/module_utils/azure_rm_common_ext.py
deleted file mode 100644
index ab3c31c796..0000000000
--- a/lib/ansible/module_utils/azure_rm_common_ext.py
+++ /dev/null
@@ -1,211 +0,0 @@
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-import re
-from ansible.module_utils.common.dict_transformations import _camel_to_snake, _snake_to_camel
-from ansible.module_utils.six import string_types
-
-
-class AzureRMModuleBaseExt(AzureRMModuleBase):
-
- def inflate_parameters(self, spec, body, level):
- if isinstance(body, list):
- for item in body:
- self.inflate_parameters(spec, item, level)
- return
- for name in spec.keys():
- # first check if option was passed
- param = body.get(name)
- if param is None:
- if spec[name].get('purgeIfNone', False):
- body.pop(name, None)
- continue
- # check if pattern needs to be used
- pattern = spec[name].get('pattern', None)
- if pattern:
- if pattern == 'camelize':
- param = _snake_to_camel(param, True)
- elif isinstance(pattern, list):
- normalized = None
- for p in pattern:
- normalized = self.normalize_resource_id(param, p)
- body[name] = normalized
- if normalized is not None:
- break
- else:
- param = self.normalize_resource_id(param, pattern)
- body[name] = param
- disposition = spec[name].get('disposition', '*')
- if level == 0 and not disposition.startswith('/'):
- continue
- if disposition == '/':
- disposition = '/*'
- parts = disposition.split('/')
- if parts[0] == '':
- # should fail if level is > 0?
- parts.pop(0)
- target_dict = body
- elem = body.pop(name)
- while len(parts) > 1:
- target_dict = target_dict.setdefault(parts.pop(0), {})
- targetName = parts[0] if parts[0] != '*' else name
- target_dict[targetName] = elem
- if spec[name].get('options'):
- self.inflate_parameters(spec[name].get('options'), target_dict[targetName], level + 1)
-
- def normalize_resource_id(self, value, pattern):
- '''
- Return a proper resource id string..
-
- :param resource_id: It could be a resource name, resource id or dict containing parts from the pattern.
- :param pattern: pattern of resource is, just like in Azure Swagger
- '''
- value_dict = {}
- if isinstance(value, string_types):
- value_parts = value.split('/')
- if len(value_parts) == 1:
- value_dict['name'] = value
- else:
- pattern_parts = pattern.split('/')
- if len(value_parts) != len(pattern_parts):
- return None
- for i in range(len(value_parts)):
- if pattern_parts[i].startswith('{'):
- value_dict[pattern_parts[i][1:-1]] = value_parts[i]
- elif value_parts[i].lower() != pattern_parts[i].lower():
- return None
- elif isinstance(value, dict):
- value_dict = value
- else:
- return None
- if not value_dict.get('subscription_id'):
- value_dict['subscription_id'] = self.subscription_id
- if not value_dict.get('resource_group'):
- value_dict['resource_group'] = self.resource_group
-
- # check if any extra values passed
- for k in value_dict:
- if not ('{' + k + '}') in pattern:
- return None
- # format url
- return pattern.format(**value_dict)
-
- def idempotency_check(self, old_params, new_params):
- '''
- Return True if something changed. Function will use fields from module_arg_spec to perform dependency checks.
- :param old_params: old parameters dictionary, body from Get request.
- :param new_params: new parameters dictionary, unpacked module parameters.
- '''
- modifiers = {}
- result = {}
- self.create_compare_modifiers(self.module.argument_spec, '', modifiers)
- self.results['modifiers'] = modifiers
- return self.default_compare(modifiers, new_params, old_params, '', self.results)
-
- def create_compare_modifiers(self, arg_spec, path, result):
- for k in arg_spec.keys():
- o = arg_spec[k]
- updatable = o.get('updatable', True)
- comparison = o.get('comparison', 'default')
- disposition = o.get('disposition', '*')
- if disposition == '/':
- disposition = '/*'
- p = (path +
- ('/' if len(path) > 0 else '') +
- disposition.replace('*', k) +
- ('/*' if o['type'] == 'list' else ''))
- if comparison != 'default' or not updatable:
- result[p] = {'updatable': updatable, 'comparison': comparison}
- if o.get('options'):
- self.create_compare_modifiers(o.get('options'), p, result)
-
- def default_compare(self, modifiers, new, old, path, result):
- '''
- Default dictionary comparison.
- This function will work well with most of the Azure resources.
- It correctly handles "location" comparison.
-
- Value handling:
- - if "new" value is None, it will be taken from "old" dictionary if "incremental_update"
- is enabled.
- List handling:
- - if list contains "name" field it will be sorted by "name" before comparison is done.
- - if module has "incremental_update" set, items missing in the new list will be copied
- from the old list
-
- Warnings:
- If field is marked as non-updatable, appropriate warning will be printed out and
- "new" structure will be updated to old value.
-
- :modifiers: Optional dictionary of modifiers, where key is the path and value is dict of modifiers
- :param new: New version
- :param old: Old version
-
- Returns True if no difference between structures has been detected.
- Returns False if difference was detected.
- '''
- if new is None:
- return True
- elif isinstance(new, dict):
- comparison_result = True
- if not isinstance(old, dict):
- result['compare'].append('changed [' + path + '] old dict is null')
- comparison_result = False
- else:
- for k in set(new.keys()) | set(old.keys()):
- new_item = new.get(k, None)
- old_item = old.get(k, None)
- if new_item is None:
- if isinstance(old_item, dict):
- new[k] = old_item
- result['compare'].append('new item was empty, using old [' + path + '][ ' + k + ' ]')
- elif not self.default_compare(modifiers, new_item, old_item, path + '/' + k, result):
- comparison_result = False
- return comparison_result
- elif isinstance(new, list):
- comparison_result = True
- if not isinstance(old, list) or len(new) != len(old):
- result['compare'].append('changed [' + path + '] length is different or old value is null')
- comparison_result = False
- else:
- if isinstance(old[0], dict):
- key = None
- if 'id' in old[0] and 'id' in new[0]:
- key = 'id'
- elif 'name' in old[0] and 'name' in new[0]:
- key = 'name'
- else:
- key = next(iter(old[0]))
- new = sorted(new, key=lambda x: x.get(key, None))
- old = sorted(old, key=lambda x: x.get(key, None))
- else:
- new = sorted(new)
- old = sorted(old)
- for i in range(len(new)):
- if not self.default_compare(modifiers, new[i], old[i], path + '/*', result):
- comparison_result = False
- return comparison_result
- else:
- updatable = modifiers.get(path, {}).get('updatable', True)
- comparison = modifiers.get(path, {}).get('comparison', 'default')
- if comparison == 'ignore':
- return True
- elif comparison == 'default' or comparison == 'sensitive':
- if isinstance(old, string_types) and isinstance(new, string_types):
- new = new.lower()
- old = old.lower()
- elif comparison == 'location':
- if isinstance(old, string_types) and isinstance(new, string_types):
- new = new.replace(' ', '').lower()
- old = old.replace(' ', '').lower()
- if str(new) != str(old):
- result['compare'].append('changed [' + path + '] ' + str(new) + ' != ' + str(old) + ' - ' + str(comparison))
- if updatable:
- return False
- else:
- self.module.warn("property '" + path + "' cannot be updated (" + str(old) + "->" + str(new) + ")")
- return True
- else:
- return True
diff --git a/lib/ansible/module_utils/azure_rm_common_rest.py b/lib/ansible/module_utils/azure_rm_common_rest.py
deleted file mode 100644
index 4fd7eaa3b4..0000000000
--- a/lib/ansible/module_utils/azure_rm_common_rest.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.azure_configuration import AzureConfiguration
- from msrest.service_client import ServiceClient
- from msrest.pipeline import ClientRawResponse
- from msrest.polling import LROPoller
- from msrestazure.polling.arm_polling import ARMPolling
- import uuid
- import json
-except ImportError:
- # This is handled in azure_rm_common
- AzureConfiguration = object
-
-ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION)
-
-
-class GenericRestClientConfiguration(AzureConfiguration):
-
- def __init__(self, credentials, subscription_id, base_url=None):
-
- if credentials is None:
- raise ValueError("Parameter 'credentials' must not be None.")
- if subscription_id is None:
- raise ValueError("Parameter 'subscription_id' must not be None.")
- if not base_url:
- base_url = 'https://management.azure.com'
-
- super(GenericRestClientConfiguration, self).__init__(base_url)
-
- self.add_user_agent(ANSIBLE_USER_AGENT)
-
- self.credentials = credentials
- self.subscription_id = subscription_id
-
-
-class GenericRestClient(object):
-
- def __init__(self, credentials, subscription_id, base_url=None):
- self.config = GenericRestClientConfiguration(credentials, subscription_id, base_url)
- self._client = ServiceClient(self.config.credentials, self.config)
- self.models = None
-
- def query(self, url, method, query_parameters, header_parameters, body, expected_status_codes, polling_timeout, polling_interval):
- # Construct and send request
- operation_config = {}
-
- request = None
-
- if header_parameters is None:
- header_parameters = {}
-
- header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
-
- if method == 'GET':
- request = self._client.get(url, query_parameters)
- elif method == 'PUT':
- request = self._client.put(url, query_parameters)
- elif method == 'POST':
- request = self._client.post(url, query_parameters)
- elif method == 'HEAD':
- request = self._client.head(url, query_parameters)
- elif method == 'PATCH':
- request = self._client.patch(url, query_parameters)
- elif method == 'DELETE':
- request = self._client.delete(url, query_parameters)
- elif method == 'MERGE':
- request = self._client.merge(url, query_parameters)
-
- response = self._client.send(request, header_parameters, body, **operation_config)
-
- if response.status_code not in expected_status_codes:
- exp = CloudError(response)
- exp.request_id = response.headers.get('x-ms-request-id')
- raise exp
- elif response.status_code == 202 and polling_timeout > 0:
- def get_long_running_output(response):
- return response
- poller = LROPoller(self._client,
- ClientRawResponse(None, response),
- get_long_running_output,
- ARMPolling(polling_interval, **operation_config))
- response = self.get_poller_result(poller, polling_timeout)
-
- return response
-
- def get_poller_result(self, poller, timeout):
- try:
- poller.wait(timeout=timeout)
- return poller.result()
- except Exception as exc:
- raise
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_acs.py b/lib/ansible/modules/cloud/azure/azure_rm_acs.py
deleted file mode 100644
index 440c0c00f4..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_acs.py
+++ /dev/null
@@ -1,745 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*
-
-# Copyright: (c) 2017, Julien Stroheker <juliens@microsoft.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_acs
-version_added: "2.4"
-short_description: Manage an Azure Container Service(ACS) instance
-description:
- - Create, update and delete an Azure Container Service(ACS) instance.
-
-options:
- resource_group:
- description:
- - Name of a resource group where the Container Services exists or will be created.
- required: true
- name:
- description:
- - Name of the Azure Container Services(ACS) instance.
- required: true
- state:
- description:
- - Assert the state of the ACS. Use C(present) to create or update an ACS and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
- location:
- description:
- - Valid azure location. Defaults to location of the resource group.
- orchestration_platform:
- description:
- - Specifies the Container Orchestration Platform to use. Currently can be either C(DCOS), C(Kubernetes) or C(Swarm).
- - The I(service_principal) must be defined if set to C(Kubernetes).
- choices:
- - 'DCOS'
- - 'Kubernetes'
- - 'Swarm'
- required: true
- master_profile:
- description:
- - Master profile suboptions.
- required: true
- suboptions:
- count:
- description:
- - Number of masters (VMs) in the container service cluster. Allowed values are C(1), C(3), and C(5).
- required: true
- choices:
- - 1
- - 3
- - 5
- vm_size:
- description:
- - The VM Size of each of the Agent Pool VM's (e.g. C(Standard_F1) / C(Standard_D2v2)).
- required: true
- version_added: 2.5
- dns_prefix:
- description:
- - The DNS Prefix to use for the Container Service master nodes.
- required: true
- linux_profile:
- description:
- - The Linux profile suboptions.
- required: true
- suboptions:
- admin_username:
- description:
- - The Admin Username for the Cluster.
- required: true
- ssh_key:
- description:
- - The Public SSH Key used to access the cluster.
- required: true
- agent_pool_profiles:
- description:
- - The agent pool profile suboptions.
- required: true
- suboptions:
- name:
- description:
- - Unique name of the agent pool profile in the context of the subscription and resource group.
- required: true
- count:
- description:
- - Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive).
- required: true
- dns_prefix:
- description:
- - The DNS Prefix given to Agents in this Agent Pool.
- required: true
- vm_size:
- description:
- - The VM Size of each of the Agent Pool VM's (e.g. C(Standard_F1) / C(Standard_D2v2)).
- required: true
- service_principal:
- description:
- - The service principal suboptions.
- - Required when I(orchestration_platform=Kubernetes).
- suboptions:
- client_id:
- description:
- - The ID for the Service Principal.
- client_secret:
- description:
- - The secret password associated with the service principal.
- diagnostics_profile:
- description:
- - Should VM Diagnostics be enabled for the Container Service VM's.
- required: true
- type: bool
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Julien Stroheker (@julienstroheker)
-
-'''
-
-EXAMPLES = '''
- - name: Create an azure container services instance running Kubernetes
- azure_rm_acs:
- name: acctestcontservice1
- location: eastus
- resource_group: myResourceGroup
- orchestration_platform: Kubernetes
- master_profile:
- - count: 3
- dns_prefix: acsk8smasterdns
- vm_size: Standard_D2_v2
- linux_profile:
- - admin_username: azureuser
- ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
- service_principal:
- - client_id: "cf72ca99-f6b9-4004-b0e0-bee10c521948"
- client_secret: "mySPNp@ssw0rd!"
- agent_pool_profiles:
- - name: default
- count: 5
- dns_prefix: acsk8sagent
- vm_size: Standard_D2_v2
- diagnostics_profile: false
- tags:
- Environment: Production
-
- - name: Create an azure container services instance running DCOS
- azure_rm_acs:
- name: acctestcontservice2
- location: eastus
- resource_group: myResourceGroup
- orchestration_platform: DCOS
- master_profile:
- - count: 3
- dns_prefix: acsdcosmasterdns
- vm_size: Standard_D2_v2
- linux_profile:
- - admin_username: azureuser
- ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
- agent_pool_profiles:
- - name: default
- count: 5
- dns_prefix: acscdcosagent
- vm_size: Standard_D2_v2
- diagnostics_profile: false
- tags:
- Environment: Production
-
- - name: Create an azure container services instance running Swarm
- azure_rm_acs:
- name: acctestcontservice3
- location: eastus
- resource_group: myResourceGroup
- orchestration_platform: Swarm
- master_profile:
- - count: 3
- dns_prefix: acsswarmmasterdns
- vm_size: Standard_D2_v2
- linux_profile:
- - admin_username: azureuser
- ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
- agent_pool_profiles:
- - name: default
- count: 5
- dns_prefix: acsswarmagent
- vm_size: Standard_D2_v2
- diagnostics_profile: false
- tags:
- Environment: Production
-
-# Deletes the specified container service in the specified subscription and resource group.
-# The operation does not delete other resources created as part of creating a container service,
-# including storage accounts, VMs, and availability sets. All the other resources created with the container
-# service are part of the same resource group and can be deleted individually.
- - name: Remove an azure container services instance
- azure_rm_acs:
- name: acctestcontservice3
- location: eastus
- resource_group: myResourceGroup
- state: absent
- orchestration_platform: Swarm
- master_profile:
- - count: 1
- vm_size: Standard_A0
- dns_prefix: acstestingmasterdns5
- linux_profile:
- - admin_username: azureuser
- ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
- agent_pool_profiles:
- - name: default
- count: 4
- dns_prefix: acctestagent15
- vm_size: Standard_A0
- diagnostics_profile: false
- tags:
- Ansible: azure_rm_acs
-'''
-RETURN = '''
-state:
- description: Current state of the Azure Container Service(ACS).
- returned: always
- type: dict
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.containerservice.models import (
- ContainerService, ContainerServiceOrchestratorProfile, ContainerServiceCustomProfile,
- ContainerServiceServicePrincipalProfile, ContainerServiceMasterProfile,
- ContainerServiceAgentPoolProfile, ContainerServiceWindowsProfile,
- ContainerServiceLinuxProfile, ContainerServiceSshConfiguration,
- ContainerServiceDiagnosticsProfile, ContainerServiceSshPublicKey,
- ContainerServiceVMDiagnostics
- )
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def create_agent_pool_profile_instance(agentpoolprofile):
- '''
- Helper method to serialize a dict to a ContainerServiceAgentPoolProfile
- :param: agentpoolprofile: dict with the parameters to setup the ContainerServiceAgentPoolProfile
- :return: ContainerServiceAgentPoolProfile
- '''
- return ContainerServiceAgentPoolProfile(
- name=agentpoolprofile['name'],
- count=agentpoolprofile['count'],
- dns_prefix=agentpoolprofile['dns_prefix'],
- vm_size=agentpoolprofile['vm_size']
- )
-
-
-def create_orch_platform_instance(orchestrator):
- '''
- Helper method to serialize a dict to a ContainerServiceOrchestratorProfile
- :param: orchestrator: dict with the parameters to setup the ContainerServiceOrchestratorProfile
- :return: ContainerServiceOrchestratorProfile
- '''
- return ContainerServiceOrchestratorProfile(
- orchestrator_type=orchestrator,
- )
-
-
-def create_service_principal_profile_instance(spnprofile):
- '''
- Helper method to serialize a dict to a ContainerServiceServicePrincipalProfile
- :param: spnprofile: dict with the parameters to setup the ContainerServiceServicePrincipalProfile
- :return: ContainerServiceServicePrincipalProfile
- '''
- return ContainerServiceServicePrincipalProfile(
- client_id=spnprofile[0]['client_id'],
- secret=spnprofile[0]['client_secret']
- )
-
-
-def create_linux_profile_instance(linuxprofile):
- '''
- Helper method to serialize a dict to a ContainerServiceLinuxProfile
- :param: linuxprofile: dict with the parameters to setup the ContainerServiceLinuxProfile
- :return: ContainerServiceLinuxProfile
- '''
- return ContainerServiceLinuxProfile(
- admin_username=linuxprofile[0]['admin_username'],
- ssh=create_ssh_configuration_instance(linuxprofile[0]['ssh_key'])
- )
-
-
-def create_ssh_configuration_instance(sshconf):
- '''
- Helper method to serialize a dict to a ContainerServiceSshConfiguration
- :param: sshconf: dict with the parameters to setup the ContainerServiceSshConfiguration
- :return: ContainerServiceSshConfiguration
- '''
- listssh = []
- key = ContainerServiceSshPublicKey(key_data=str(sshconf))
- listssh.append(key)
- return ContainerServiceSshConfiguration(
- public_keys=listssh
- )
-
-
-def create_master_profile_instance(masterprofile):
- '''
- Helper method to serialize a dict to a ContainerServiceMasterProfile
- Note: first_consecutive_static_ip is specifically set to None, for Azure server doesn't accept
- request body with this property. This should be an inconsistency bug before Azure client SDK
- and Azure server.
- :param: masterprofile: dict with the parameters to setup the ContainerServiceMasterProfile
- :return: ContainerServiceMasterProfile
- '''
- return ContainerServiceMasterProfile(
- count=masterprofile[0]['count'],
- dns_prefix=masterprofile[0]['dns_prefix'],
- vm_size=masterprofile[0]['vm_size'],
- first_consecutive_static_ip=None
- )
-
-
-def create_diagnostics_profile_instance(diagprofile):
- '''
- Helper method to serialize a dict to a ContainerServiceDiagnosticsProfile
- :param: diagprofile: dict with the parameters to setup the ContainerServiceDiagnosticsProfile
- :return: ContainerServiceDiagnosticsProfile
- '''
- return ContainerServiceDiagnosticsProfile(
- vm_diagnostics=create_vm_diagnostics_instance(diagprofile)
- )
-
-
-def create_vm_diagnostics_instance(vmdiag):
- '''
- Helper method to serialize a dict to a ContainerServiceVMDiagnostics
- :param: vmdiag: dict with the parameters to setup the ContainerServiceVMDiagnostics
- :return: ContainerServiceVMDiagnostics
- '''
- return ContainerServiceVMDiagnostics(
- enabled=vmdiag
- )
-
-
-def create_acs_dict(acs):
- '''
- Helper method to deserialize a ContainerService to a dict
- :param: acs: ContainerService or AzureOperationPoller with the Azure callback object
- :return: dict with the state on Azure
- '''
- service_principal_profile_dict = None
- if acs.orchestrator_profile.orchestrator_type == 'Kubernetes':
- service_principal_profile_dict = create_service_principal_profile_dict(acs.service_principal_profile)
-
- return dict(
- id=acs.id,
- name=acs.name,
- location=acs.location,
- tags=acs.tags,
- orchestrator_profile=create_orchestrator_profile_dict(acs.orchestrator_profile),
- master_profile=create_master_profile_dict(acs.master_profile),
- linux_profile=create_linux_profile_dict(acs.linux_profile),
- service_principal_profile=service_principal_profile_dict,
- diagnostics_profile=create_diagnotstics_profile_dict(acs.diagnostics_profile),
- provisioning_state=acs.provisioning_state,
- agent_pool_profiles=create_agent_pool_profiles_dict(acs.agent_pool_profiles),
- type=acs.type
- )
-
-
-def create_linux_profile_dict(linuxprofile):
- '''
- Helper method to deserialize a ContainerServiceLinuxProfile to a dict
- :param: linuxprofile: ContainerServiceLinuxProfile with the Azure callback object
- :return: dict with the state on Azure
- '''
- return dict(
- ssh_key=linuxprofile.ssh.public_keys[0].key_data,
- admin_username=linuxprofile.admin_username
- )
-
-
-def create_master_profile_dict(masterprofile):
- '''
- Helper method to deserialize a ContainerServiceMasterProfile to a dict
- :param: masterprofile: ContainerServiceMasterProfile with the Azure callback object
- :return: dict with the state on Azure
- '''
- return dict(
- count=masterprofile.count,
- fqdn=masterprofile.fqdn,
- vm_size=masterprofile.vm_size,
- dns_prefix=masterprofile.dns_prefix
- )
-
-
-def create_service_principal_profile_dict(serviceprincipalprofile):
- '''
- Helper method to deserialize a ContainerServiceServicePrincipalProfile to a dict
- Note: For security reason, the service principal secret is skipped on purpose.
- :param: serviceprincipalprofile: ContainerServiceServicePrincipalProfile with the Azure callback object
- :return: dict with the state on Azure
- '''
- return dict(
- client_id=serviceprincipalprofile.client_id
- )
-
-
-def create_diagnotstics_profile_dict(diagnosticsprofile):
- '''
- Helper method to deserialize a ContainerServiceVMDiagnostics to a dict
- :param: diagnosticsprofile: ContainerServiceVMDiagnostics with the Azure callback object
- :return: dict with the state on Azure
- '''
- return dict(
- vm_diagnostics=diagnosticsprofile.vm_diagnostics.enabled
- )
-
-
-def create_orchestrator_profile_dict(orchestratorprofile):
- '''
- Helper method to deserialize a ContainerServiceOrchestratorProfile to a dict
- :param: orchestratorprofile: ContainerServiceOrchestratorProfile with the Azure callback object
- :return: dict with the state on Azure
- '''
- return dict(
- orchestrator_type=str(orchestratorprofile.orchestrator_type)
- )
-
-
-def create_agent_pool_profiles_dict(agentpoolprofiles):
- '''
- Helper method to deserialize a ContainerServiceAgentPoolProfile to a dict
- :param: agentpoolprofiles: ContainerServiceAgentPoolProfile with the Azure callback object
- :return: dict with the state on Azure
- '''
- return [dict(
- count=profile.count,
- vm_size=profile.vm_size,
- name=profile.name,
- dns_prefix=profile.dns_prefix,
- fqdn=profile.fqdn
- ) for profile in agentpoolprofiles]
-
-
-class AzureRMContainerService(AzureRMModuleBase):
- """Configuration class for an Azure RM container service resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- ),
- location=dict(
- type='str'
- ),
- orchestration_platform=dict(
- type='str',
- required=True,
- choices=['DCOS', 'Kubernetes', 'Swarm']
- ),
- master_profile=dict(
- type='list',
- required=True
- ),
- linux_profile=dict(
- type='list',
- required=True
- ),
- agent_pool_profiles=dict(
- type='list',
- required=True
- ),
- service_principal=dict(
- type='list'
- ),
- diagnostics_profile=dict(
- type='bool',
- required=True
- )
- )
-
- self.resource_group = None
- self.name = None
- self.location = None
- self.tags = None
- self.state = None
- self.orchestration_platform = None
- self.master_profile = None
- self.linux_profile = None
- self.agent_pool_profiles = None
- self.service_principal = None
- self.diagnostics_profile = None
-
- self.results = dict(changed=False, state=dict())
-
- super(AzureRMContainerService, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- resource_group = None
- response = None
- results = dict()
- to_be_updated = False
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- # Check if the ACS instance already present in the RG
- if self.state == 'present':
-
- if self.orchestration_platform == 'Kubernetes':
- if not self.service_principal:
- self.fail('service_principal should be specified when using Kubernetes')
- if not self.service_principal[0].get('client_id'):
- self.fail('service_principal.client_id should be specified when using Kubernetes')
- if not self.service_principal[0].get('client_secret'):
- self.fail('service_principal.client_secret should be specified when using Kubernetes')
-
- mastercount = self.master_profile[0].get('count')
- if mastercount != 1 and mastercount != 3 and mastercount != 5:
- self.fail('Master Count number wrong : {0} / should be 1 3 or 5'.format(mastercount))
-
- # For now Agent Pool cannot be more than 1, just remove this part in the future if it change
- agentpoolcount = len(self.agent_pool_profiles)
- if agentpoolcount > 1:
- self.fail('You cannot specify more than agent_pool_profiles')
-
- response = self.get_acs()
- self.results['state'] = response
- if not response:
- to_be_updated = True
-
- else:
- self.log('Results : {0}'.format(response))
- update_tags, response['tags'] = self.update_tags(response['tags'])
-
- if response['provisioning_state'] == "Succeeded":
- if update_tags:
- to_be_updated = True
-
- def is_property_changed(profile, property, ignore_case=False):
- base = response[profile].get(property)
- new = getattr(self, profile)[0].get(property)
- if ignore_case:
- return base.lower() != new.lower()
- else:
- return base != new
-
- # Cannot Update the master count for now // Uncomment this block in the future to support it
- if is_property_changed('master_profile', 'count'):
- # self.log(("Master Profile Count Diff, Was {0} / Now {1}"
- # .format(response['master_profile'].count,
- # self.master_profile[0].get('count'))))
- # to_be_updated = True
- self.module.warn("master_profile.count cannot be updated")
-
- # Cannot Update the master vm_size for now. Could be a client SDK bug
- # Uncomment this block in the future to support it
- if is_property_changed('master_profile', 'vm_size', True):
- # self.log(("Master Profile VM Size Diff, Was {0} / Now {1}"
- # .format(response['master_profile'].get('vm_size'),
- # self.master_profile[0].get('vm_size'))))
- # to_be_updated = True
- self.module.warn("master_profile.vm_size cannot be updated")
-
- # Cannot Update the SSH Key for now // Uncomment this block in the future to support it
- if is_property_changed('linux_profile', 'ssh_key'):
- # self.log(("Linux Profile Diff SSH, Was {0} / Now {1}"
- # .format(response['linux_profile'].ssh.public_keys[0].key_data,
- # self.linux_profile[0].get('ssh_key'))))
- # to_be_updated = True
- self.module.warn("linux_profile.ssh_key cannot be updated")
-
- # self.log("linux_profile response : {0}".format(response['linux_profile'].get('admin_username')))
- # self.log("linux_profile self : {0}".format(self.linux_profile[0].get('admin_username')))
- # Cannot Update the Username for now // Uncomment this block in the future to support it
- if is_property_changed('linux_profile', 'admin_username'):
- # self.log(("Linux Profile Diff User, Was {0} / Now {1}"
- # .format(response['linux_profile'].admin_username,
- # self.linux_profile[0].get('admin_username'))))
- # to_be_updated = True
- self.module.warn("linux_profile.admin_username cannot be updated")
-
- # Cannot have more that one agent pool profile for now // Uncomment this block in the future to support it
- # if len(response['agent_pool_profiles']) != len(self.agent_pool_profiles):
- # self.log("Agent Pool count is diff, need to updated")
- # to_be_updated = True
-
- for profile_result in response['agent_pool_profiles']:
- matched = False
- for profile_self in self.agent_pool_profiles:
- if profile_result['name'] == profile_self['name']:
- matched = True
- if profile_result['count'] != profile_self['count'] or profile_result['vm_size'] != \
- profile_self['vm_size']:
- self.log(("Agent Profile Diff - Count was {0} / Now {1} - Vm_size was {2} / Now {3}"
- .format(profile_result['count'], profile_self['count'],
- profile_result['vm_size'], profile_self['vm_size'])))
- to_be_updated = True
- if not matched:
- self.log("Agent Pool not found")
- to_be_updated = True
-
- if to_be_updated:
- self.log("Need to Create / Update the ACS instance")
-
- if self.check_mode:
- return self.results
-
- self.results['state'] = self.create_update_acs()
- self.results['changed'] = True
-
- self.log("Creation / Update done")
- elif self.state == 'absent':
- if self.check_mode:
- return self.results
- self.delete_acs()
- self.log("ACS instance deleted")
-
- return self.results
-
- def create_update_acs(self):
- '''
- Creates or updates a container service with the specified configuration of orchestrator, masters, and agents.
-
- :return: deserialized ACS instance state dictionary
- '''
- self.log("Creating / Updating the ACS instance {0}".format(self.name))
-
- service_principal_profile = None
- agentpools = []
-
- if self.agent_pool_profiles:
- for profile in self.agent_pool_profiles:
- self.log("Trying to push the following Profile {0}".format(profile))
- agentpools.append(create_agent_pool_profile_instance(profile))
-
- if self.orchestration_platform == 'Kubernetes':
- service_principal_profile = create_service_principal_profile_instance(self.service_principal)
-
- parameters = ContainerService(
- location=self.location,
- tags=self.tags,
- orchestrator_profile=create_orch_platform_instance(self.orchestration_platform),
- service_principal_profile=service_principal_profile,
- linux_profile=create_linux_profile_instance(self.linux_profile),
- master_profile=create_master_profile_instance(self.master_profile),
- agent_pool_profiles=agentpools,
- diagnostics_profile=create_diagnostics_profile_instance(self.diagnostics_profile)
- )
-
- # self.log("orchestrator_profile : {0}".format(parameters.orchestrator_profile))
- # self.log("service_principal_profile : {0}".format(parameters.service_principal_profile))
- # self.log("linux_profile : {0}".format(parameters.linux_profile))
- # self.log("ssh from yaml : {0}".format(results.get('linux_profile')[0]))
- # self.log("ssh : {0}".format(parameters.linux_profile.ssh))
- # self.log("master_profile : {0}".format(parameters.master_profile))
- # self.log("agent_pool_profiles : {0}".format(parameters.agent_pool_profiles))
- # self.log("vm_diagnostics : {0}".format(parameters.diagnostics_profile.vm_diagnostics))
-
- try:
- poller = self.containerservice_client.container_services.create_or_update(self.resource_group, self.name,
- parameters)
- response = self.get_poller_result(poller)
- except CloudError as exc:
- self.log('Error attempting to create the ACS instance.')
- self.fail("Error creating the ACS instance: {0}".format(str(exc)))
- return create_acs_dict(response)
-
- def delete_acs(self):
- '''
- Deletes the specified container service in the specified subscription and resource group.
- The operation does not delete other resources created as part of creating a container service,
- including storage accounts, VMs, and availability sets.
- All the other resources created with the container service are part of the same resource group and can be deleted individually.
-
- :return: True
- '''
- self.log("Deleting the ACS instance {0}".format(self.name))
- try:
- poller = self.containerservice_client.container_services.delete(self.resource_group, self.name)
- self.get_poller_result(poller)
- except CloudError as e:
- self.log('Error attempting to delete the ACS instance.')
- self.fail("Error deleting the ACS instance: {0}".format(str(e)))
-
- return True
-
- def get_acs(self):
- '''
- Gets the properties of the specified container service.
-
- :return: deserialized ACS instance state dictionary
- '''
- self.log("Checking if the ACS instance {0} is present".format(self.name))
- found = False
- try:
- response = self.containerservice_client.container_services.get(self.resource_group, self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("ACS instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the ACS instance.')
- if found is True:
- return create_acs_dict(response)
- else:
- return False
-
-
-def main():
- """Main execution"""
- AzureRMContainerService()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_aks.py b/lib/ansible/modules/cloud/azure/azure_rm_aks.py
deleted file mode 100644
index ad509db340..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_aks.py
+++ /dev/null
@@ -1,841 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Sertac Ozercan, <seozerca@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_aks
-version_added: "2.6"
-short_description: Manage a managed Azure Container Service (AKS) instance
-description:
- - Create, update and delete a managed Azure Container Service (AKS) instance.
-
-options:
- resource_group:
- description:
- - Name of a resource group where the managed Azure Container Services (AKS) exists or will be created.
- required: true
- name:
- description:
- - Name of the managed Azure Container Services (AKS) instance.
- required: true
- state:
- description:
- - Assert the state of the AKS. Use C(present) to create or update an AKS and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
- location:
- description:
- - Valid azure location. Defaults to location of the resource group.
- dns_prefix:
- description:
- - DNS prefix specified when creating the managed cluster.
- kubernetes_version:
- description:
- - Version of Kubernetes specified when creating the managed cluster.
- linux_profile:
- description:
- - The Linux profile suboptions.
- suboptions:
- admin_username:
- description:
- - The Admin Username for the cluster.
- required: true
- ssh_key:
- description:
- - The Public SSH Key used to access the cluster.
- required: true
- agent_pool_profiles:
- description:
- - The agent pool profile suboptions.
- suboptions:
- name:
- description:
- - Unique name of the agent pool profile in the context of the subscription and resource group.
- required: true
- count:
- description:
- - Number of agents (VMs) to host docker containers.
- - Allowed values must be in the range of C(1) to C(100) (inclusive).
- required: true
- vm_size:
- description:
- - The VM Size of each of the Agent Pool VM's (e.g. C(Standard_F1) / C(Standard_D2v2)).
- required: true
- vnet_subnet_id:
- description:
- - Id of subnet for Agent Pool VM's Network Interfaces
- os_disk_size_gb:
- description:
- - Size of the OS disk.
- service_principal:
- description:
- - The service principal suboptions.
- suboptions:
- client_id:
- description:
- - The ID for the Service Principal.
- required: true
- client_secret:
- description:
- - The secret password associated with the service principal.
- required: true
- enable_rbac:
- description:
- - Enable RBAC.
- - Existing non-RBAC enabled AKS clusters cannot currently be updated for RBAC use.
- type: bool
- default: no
- version_added: "2.8"
- network_profile:
- description:
- - Profile of network configuration.
- suboptions:
- network_plugin:
- description:
- - Network plugin used for building Kubernetes network.
- - This property cannot been changed.
- - With C(kubenet), nodes get an IP address from the Azure virtual network subnet.
- - AKS features such as Virtual Nodes or network policies aren't supported with C(kubenet).
- - C(azure) enables Azure Container Networking Interface(CNI), every pod gets an IP address from the subnet and can be accessed directly.
- default: kubenet
- choices:
- - azure
- - kubenet
- network_policy:
- description: Network policy used for building Kubernetes network.
- choices:
- - azure
- - calico
- pod_cidr:
- description:
- - A CIDR notation IP range from which to assign pod IPs when I(network_plugin=kubenet) is used.
- - It should be a large address space that isn't in use elsewhere in your network environment.
- - This address range must be large enough to accommodate the number of nodes that you expect to scale up to.
- default: "10.244.0.0/16"
- service_cidr:
- description:
- - A CIDR notation IP range from which to assign service cluster IPs.
- - It must not overlap with any Subnet IP ranges.
- - It should be the *.10 address of your service IP address range.
- default: "10.0.0.0/16"
- dns_service_ip:
- description:
- - An IP address assigned to the Kubernetes DNS service.
- - It must be within the Kubernetes service address range specified in serviceCidr.
- default: "10.0.0.10"
- docker_bridge_cidr:
- description:
- - A CIDR notation IP range assigned to the Docker bridge network.
- - It must not overlap with any Subnet IP ranges or the Kubernetes service address range.
- default: "172.17.0.1/16"
- version_added: "2.8"
- aad_profile:
- description:
- - Profile of Azure Active Directory configuration.
- suboptions:
- client_app_id:
- description: The client AAD application ID.
- server_app_id:
- description: The server AAD application ID.
- server_app_secret:
- description: The server AAD application secret.
- tenant_id:
- description:
- - The AAD tenant ID to use for authentication.
- - If not specified, will use the tenant of the deployment subscription.
- version_added: "2.8"
- addon:
- description:
- - Profile of managed cluster add-on.
- - Key can be C(http_application_routing), C(monitoring), C(virtual_node).
- - Value must be a dict contains a bool variable C(enabled).
- type: dict
- suboptions:
- http_application_routing:
- description:
- - The HTTP application routing solution makes it easy to access applications that are deployed to your cluster.
- type: dict
- suboptions:
- enabled:
- description:
- - Whether the solution enabled.
- type: bool
- monitoring:
- description:
- - It gives you performance visibility by collecting memory and processor metrics from controllers, nodes,
- and containers that are available in Kubernetes through the Metrics API.
- type: dict
- suboptions:
- enabled:
- description:
- - Whether the solution enabled.
- type: bool
- log_analytics_workspace_resource_id:
- description:
- - Where to store the container metrics.
- virtual_node:
- description:
- - With virtual nodes, you have quick provisioning of pods, and only pay per second for their execution time.
- - You don't need to wait for Kubernetes cluster autoscaler to deploy VM compute nodes to run the additional pods.
- type: dict
- suboptions:
- enabled:
- description:
- - Whether the solution enabled.
- type: bool
- subnet_resource_id:
- description:
- - Subnet associated to the cluster.
- version_added: "2.8"
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Sertac Ozercan (@sozercan)
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
- - name: Create a managed Azure Container Services (AKS) instance
- azure_rm_aks:
- name: myAKS
- location: eastus
- resource_group: myResourceGroup
- dns_prefix: akstest
- kubernetes_version: 1.14.6
- linux_profile:
- admin_username: azureuser
- ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAA...
- service_principal:
- client_id: "cf72ca99-f6b9-4004-b0e0-bee10c521948"
- client_secret: "mySPNp@ssw0rd!"
- agent_pool_profiles:
- - name: default
- count: 5
- vm_size: Standard_D2_v2
- tags:
- Environment: Production
-
- - name: Remove a managed Azure Container Services (AKS) instance
- azure_rm_aks:
- name: myAKS
- resource_group: myResourceGroup
- state: absent
-'''
-RETURN = '''
-state:
- description: Current state of the Azure Container Service (AKS).
- returned: always
- type: dict
- example:
- agent_pool_profiles:
- - count: 1
- dns_prefix: Null
- name: default
- os_disk_size_gb: Null
- os_type: Linux
- ports: Null
- storage_profile: ManagedDisks
- vm_size: Standard_DS1_v2
- vnet_subnet_id: Null
- changed: false
- dns_prefix: aks9860bdcd89
- id: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.ContainerService/managedClusters/aks9860bdc"
- kube_config: "......"
- kubernetes_version: 1.14.6
- linux_profile:
- admin_username: azureuser
- ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADA.....
- location: eastus
- name: aks9860bdc
- provisioning_state: Succeeded
- service_principal_profile:
- client_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- tags: {}
- type: Microsoft.ContainerService/ManagedClusters
-'''
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def create_aks_dict(aks):
- '''
- Helper method to deserialize a ContainerService to a dict
- :param: aks: ContainerService or AzureOperationPoller with the Azure callback object
- :return: dict with the state on Azure
- '''
-
- return dict(
- id=aks.id,
- name=aks.name,
- location=aks.location,
- dns_prefix=aks.dns_prefix,
- kubernetes_version=aks.kubernetes_version,
- tags=aks.tags,
- linux_profile=create_linux_profile_dict(aks.linux_profile),
- service_principal_profile=create_service_principal_profile_dict(
- aks.service_principal_profile),
- provisioning_state=aks.provisioning_state,
- agent_pool_profiles=create_agent_pool_profiles_dict(
- aks.agent_pool_profiles),
- type=aks.type,
- kube_config=aks.kube_config,
- enable_rbac=aks.enable_rbac,
- network_profile=create_network_profiles_dict(aks.network_profile),
- aad_profile=create_aad_profiles_dict(aks.aad_profile),
- addon=create_addon_dict(aks.addon_profiles),
- fqdn=aks.fqdn,
- node_resource_group=aks.node_resource_group
- )
-
-
-def create_network_profiles_dict(network):
- return dict(
- network_plugin=network.network_plugin,
- network_policy=network.network_policy,
- pod_cidr=network.pod_cidr,
- service_cidr=network.service_cidr,
- dns_service_ip=network.dns_service_ip,
- docker_bridge_cidr=network.docker_bridge_cidr
- ) if network else dict()
-
-
-def create_aad_profiles_dict(aad):
- return aad.as_dict() if aad else dict()
-
-
-def create_addon_dict(addon):
- result = dict()
- addon = addon or dict()
- for key in addon.keys():
- result[key] = addon[key].config
- result[key]['enabled'] = addon[key].enabled
- return result
-
-
-def create_linux_profile_dict(linuxprofile):
- '''
- Helper method to deserialize a ContainerServiceLinuxProfile to a dict
- :param: linuxprofile: ContainerServiceLinuxProfile with the Azure callback object
- :return: dict with the state on Azure
- '''
- return dict(
- ssh_key=linuxprofile.ssh.public_keys[0].key_data,
- admin_username=linuxprofile.admin_username
- )
-
-
-def create_service_principal_profile_dict(serviceprincipalprofile):
- '''
- Helper method to deserialize a ContainerServiceServicePrincipalProfile to a dict
- Note: For security reason, the service principal secret is skipped on purpose.
- :param: serviceprincipalprofile: ContainerServiceServicePrincipalProfile with the Azure callback object
- :return: dict with the state on Azure
- '''
- return dict(
- client_id=serviceprincipalprofile.client_id
- )
-
-
-def create_agent_pool_profiles_dict(agentpoolprofiles):
- '''
- Helper method to deserialize a ContainerServiceAgentPoolProfile to a dict
- :param: agentpoolprofiles: ContainerServiceAgentPoolProfile with the Azure callback object
- :return: dict with the state on Azure
- '''
- return [dict(
- count=profile.count,
- vm_size=profile.vm_size,
- name=profile.name,
- os_disk_size_gb=profile.os_disk_size_gb,
- storage_profile=profile.storage_profile,
- vnet_subnet_id=profile.vnet_subnet_id,
- os_type=profile.os_type
- ) for profile in agentpoolprofiles] if agentpoolprofiles else None
-
-
-def create_addon_profiles_spec():
- '''
- Helper method to parse the ADDONS dictionary and generate the addon spec
- '''
- spec = dict()
- for key in ADDONS.keys():
- values = ADDONS[key]
- addon_spec = dict(
- enabled=dict(type='bool', default=True)
- )
- configs = values.get('config') or {}
- for item in configs.keys():
- addon_spec[item] = dict(type='str', aliases=[configs[item]], required=True)
- spec[key] = dict(type='dict', options=addon_spec, aliases=[values['name']])
- return spec
-
-
-ADDONS = {
- 'http_application_routing': dict(name='httpApplicationRouting'),
- 'monitoring': dict(name='omsagent', config={'log_analytics_workspace_resource_id': 'logAnalyticsWorkspaceResourceID'}),
- 'virtual_node': dict(name='aciConnector', config={'subnet_resource_id': 'SubnetName'})
-}
-
-
-linux_profile_spec = dict(
- admin_username=dict(type='str', required=True),
- ssh_key=dict(type='str', required=True)
-)
-
-
-service_principal_spec = dict(
- client_id=dict(type='str', required=True),
- client_secret=dict(type='str', no_log=True)
-)
-
-
-agent_pool_profile_spec = dict(
- name=dict(type='str', required=True),
- count=dict(type='int', required=True),
- vm_size=dict(type='str', required=True),
- os_disk_size_gb=dict(type='int'),
- dns_prefix=dict(type='str'),
- ports=dict(type='list', elements='int'),
- storage_profiles=dict(type='str', choices=[
- 'StorageAccount', 'ManagedDisks']),
- vnet_subnet_id=dict(type='str'),
- os_type=dict(type='str', choices=['Linux', 'Windows'])
-)
-
-
-network_profile_spec = dict(
- network_plugin=dict(type='str', choices=['azure', 'kubenet']),
- network_policy=dict(type='str'),
- pod_cidr=dict(type='str'),
- service_cidr=dict(type='str'),
- dns_service_ip=dict(type='str'),
- docker_bridge_cidr=dict(type='str')
-)
-
-
-aad_profile_spec = dict(
- client_app_id=dict(type='str'),
- server_app_id=dict(type='str'),
- server_app_secret=dict(type='str', no_log=True),
- tenant_id=dict(type='str')
-)
-
-
-class AzureRMManagedCluster(AzureRMModuleBase):
- """Configuration class for an Azure RM container service (AKS) resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- ),
- location=dict(
- type='str'
- ),
- dns_prefix=dict(
- type='str'
- ),
- kubernetes_version=dict(
- type='str'
- ),
- linux_profile=dict(
- type='dict',
- options=linux_profile_spec
- ),
- agent_pool_profiles=dict(
- type='list',
- elements='dict',
- options=agent_pool_profile_spec
- ),
- service_principal=dict(
- type='dict',
- options=service_principal_spec
- ),
- enable_rbac=dict(
- type='bool',
- default=False
- ),
- network_profile=dict(
- type='dict',
- options=network_profile_spec
- ),
- aad_profile=dict(
- type='dict',
- options=aad_profile_spec
- ),
- addon=dict(
- type='dict',
- options=create_addon_profiles_spec()
- )
- )
-
- self.resource_group = None
- self.name = None
- self.location = None
- self.dns_prefix = None
- self.kubernetes_version = None
- self.tags = None
- self.state = None
- self.linux_profile = None
- self.agent_pool_profiles = None
- self.service_principal = None
- self.enable_rbac = False
- self.network_profile = None
- self.aad_profile = None
- self.addon = None
-
- required_if = [
- ('state', 'present', [
- 'dns_prefix', 'linux_profile', 'agent_pool_profiles', 'service_principal'])
- ]
-
- self.results = dict(changed=False)
-
- super(AzureRMManagedCluster, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True,
- required_if=required_if)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- resource_group = None
- to_be_updated = False
- update_tags = False
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- response = self.get_aks()
-
- # Check if the AKS instance already present in the RG
- if self.state == 'present':
- # For now Agent Pool cannot be more than 1, just remove this part in the future if it change
- agentpoolcount = len(self.agent_pool_profiles)
- if agentpoolcount > 1:
- self.fail('You cannot specify more than one agent_pool_profiles currently')
-
- available_versions = self.get_all_versions()
- if not response:
- to_be_updated = True
- if self.kubernetes_version not in available_versions.keys():
- self.fail("Unsupported kubernetes version. Expected one of {0} but got {1}".format(available_versions.keys(), self.kubernetes_version))
- else:
- self.results = response
- self.results['changed'] = False
- self.log('Results : {0}'.format(response))
- update_tags, response['tags'] = self.update_tags(response['tags'])
-
- if response['provisioning_state'] == "Succeeded":
-
- def is_property_changed(profile, property, ignore_case=False):
- base = response[profile].get(property)
- new = getattr(self, profile).get(property)
- if ignore_case:
- return base.lower() != new.lower()
- else:
- return base != new
-
- # Cannot Update the SSH Key for now // Let service to handle it
- if is_property_changed('linux_profile', 'ssh_key'):
- self.log(("Linux Profile Diff SSH, Was {0} / Now {1}"
- .format(response['linux_profile']['ssh_key'], self.linux_profile.get('ssh_key'))))
- to_be_updated = True
- # self.module.warn("linux_profile.ssh_key cannot be updated")
-
- # self.log("linux_profile response : {0}".format(response['linux_profile'].get('admin_username')))
- # self.log("linux_profile self : {0}".format(self.linux_profile[0].get('admin_username')))
- # Cannot Update the Username for now // Let service to handle it
- if is_property_changed('linux_profile', 'admin_username'):
- self.log(("Linux Profile Diff User, Was {0} / Now {1}"
- .format(response['linux_profile']['admin_username'], self.linux_profile.get('admin_username'))))
- to_be_updated = True
- # self.module.warn("linux_profile.admin_username cannot be updated")
-
- # Cannot have more that one agent pool profile for now
- if len(response['agent_pool_profiles']) != len(self.agent_pool_profiles):
- self.log("Agent Pool count is diff, need to updated")
- to_be_updated = True
-
- if response['kubernetes_version'] != self.kubernetes_version:
- upgrade_versions = available_versions.get(response['kubernetes_version']) or available_versions.keys()
- if upgrade_versions and self.kubernetes_version not in upgrade_versions:
- self.fail('Cannot upgrade kubernetes version to {0}, supported value are {1}'.format(self.kubernetes_version, upgrade_versions))
- to_be_updated = True
-
- if response['enable_rbac'] != self.enable_rbac:
- to_be_updated = True
-
- if self.network_profile:
- for key in self.network_profile.keys():
- original = response['network_profile'].get(key) or ''
- if self.network_profile[key] and self.network_profile[key].lower() != original.lower():
- to_be_updated = True
-
- def compare_addon(origin, patch, config):
- if not patch:
- return True
- if not origin:
- return False
- if origin['enabled'] != patch['enabled']:
- return False
- config = config or dict()
- for key in config.keys():
- if origin.get(config[key]) != patch.get(key):
- return False
- return True
-
- if self.addon:
- for key in ADDONS.keys():
- addon_name = ADDONS[key]['name']
- if not compare_addon(response['addon'].get(addon_name), self.addon.get(key), ADDONS[key].get('config')):
- to_be_updated = True
-
- for profile_result in response['agent_pool_profiles']:
- matched = False
- for profile_self in self.agent_pool_profiles:
- if profile_result['name'] == profile_self['name']:
- matched = True
- os_disk_size_gb = profile_self.get('os_disk_size_gb') or profile_result['os_disk_size_gb']
- if profile_result['count'] != profile_self['count'] \
- or profile_result['vm_size'] != profile_self['vm_size'] \
- or profile_result['os_disk_size_gb'] != os_disk_size_gb \
- or profile_result['vnet_subnet_id'] != profile_self.get('vnet_subnet_id', profile_result['vnet_subnet_id']):
- self.log(("Agent Profile Diff - Origin {0} / Update {1}".format(str(profile_result), str(profile_self))))
- to_be_updated = True
- if not matched:
- self.log("Agent Pool not found")
- to_be_updated = True
-
- if to_be_updated:
- self.log("Need to Create / Update the AKS instance")
-
- if not self.check_mode:
- self.results = self.create_update_aks()
- self.log("Creation / Update done")
-
- self.results['changed'] = True
- elif update_tags:
- self.log("Need to Update the AKS tags")
-
- if not self.check_mode:
- self.results['tags'] = self.update_aks_tags()
- self.results['changed'] = True
- return self.results
-
- elif self.state == 'absent' and response:
- self.log("Need to Delete the AKS instance")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_aks()
-
- self.log("AKS instance deleted")
-
- return self.results
-
- def create_update_aks(self):
- '''
- Creates or updates a managed Azure container service (AKS) with the specified configuration of agents.
-
- :return: deserialized AKS instance state dictionary
- '''
- self.log("Creating / Updating the AKS instance {0}".format(self.name))
-
- agentpools = []
-
- if self.agent_pool_profiles:
- agentpools = [self.create_agent_pool_profile_instance(profile) for profile in self.agent_pool_profiles]
-
- service_principal_profile = self.create_service_principal_profile_instance(self.service_principal)
-
- parameters = self.managedcluster_models.ManagedCluster(
- location=self.location,
- dns_prefix=self.dns_prefix,
- kubernetes_version=self.kubernetes_version,
- tags=self.tags,
- service_principal_profile=service_principal_profile,
- agent_pool_profiles=agentpools,
- linux_profile=self.create_linux_profile_instance(self.linux_profile),
- enable_rbac=self.enable_rbac,
- network_profile=self.create_network_profile_instance(self.network_profile),
- aad_profile=self.create_aad_profile_instance(self.aad_profile),
- addon_profiles=self.create_addon_profile_instance(self.addon)
- )
-
- # self.log("service_principal_profile : {0}".format(parameters.service_principal_profile))
- # self.log("linux_profile : {0}".format(parameters.linux_profile))
- # self.log("ssh from yaml : {0}".format(results.get('linux_profile')[0]))
- # self.log("ssh : {0}".format(parameters.linux_profile.ssh))
- # self.log("agent_pool_profiles : {0}".format(parameters.agent_pool_profiles))
-
- try:
- poller = self.managedcluster_client.managed_clusters.create_or_update(self.resource_group, self.name, parameters)
- response = self.get_poller_result(poller)
- response.kube_config = self.get_aks_kubeconfig()
- return create_aks_dict(response)
- except CloudError as exc:
- self.log('Error attempting to create the AKS instance.')
- self.fail("Error creating the AKS instance: {0}".format(exc.message))
-
- def update_aks_tags(self):
- try:
- poller = self.managedcluster_client.managed_clusters.update_tags(self.resource_group, self.name, self.tags)
- response = self.get_poller_result(poller)
- return response.tags
- except CloudError as exc:
- self.fail("Error attempting to update AKS tags: {0}".format(exc.message))
-
- def delete_aks(self):
- '''
- Deletes the specified managed container service (AKS) in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the AKS instance {0}".format(self.name))
- try:
- poller = self.managedcluster_client.managed_clusters.delete(self.resource_group, self.name)
- self.get_poller_result(poller)
- return True
- except CloudError as e:
- self.log('Error attempting to delete the AKS instance.')
- self.fail("Error deleting the AKS instance: {0}".format(e.message))
- return False
-
- def get_aks(self):
- '''
- Gets the properties of the specified container service.
-
- :return: deserialized AKS instance state dictionary
- '''
- self.log("Checking if the AKS instance {0} is present".format(self.name))
- try:
- response = self.managedcluster_client.managed_clusters.get(self.resource_group, self.name)
- self.log("Response : {0}".format(response))
- self.log("AKS instance : {0} found".format(response.name))
- response.kube_config = self.get_aks_kubeconfig()
- return create_aks_dict(response)
- except CloudError:
- self.log('Did not find the AKS instance.')
- return False
-
- def get_all_versions(self):
- try:
- result = dict()
- response = self.containerservice_client.container_services.list_orchestrators(self.location, resource_type='managedClusters')
- orchestrators = response.orchestrators
- for item in orchestrators:
- result[item.orchestrator_version] = [x.orchestrator_version for x in item.upgrades] if item.upgrades else []
- return result
- except Exception as exc:
- self.fail('Error when getting AKS supported kubernetes version list for location {0} - {1}'.format(self.location, exc.message or str(exc)))
-
- def get_aks_kubeconfig(self):
- '''
- Gets kubeconfig for the specified AKS instance.
-
- :return: AKS instance kubeconfig
- '''
- access_profile = self.managedcluster_client.managed_clusters.get_access_profile(resource_group_name=self.resource_group,
- resource_name=self.name,
- role_name="clusterUser")
- return access_profile.kube_config.decode('utf-8')
-
- def create_agent_pool_profile_instance(self, agentpoolprofile):
- '''
- Helper method to serialize a dict to a ManagedClusterAgentPoolProfile
- :param: agentpoolprofile: dict with the parameters to setup the ManagedClusterAgentPoolProfile
- :return: ManagedClusterAgentPoolProfile
- '''
- return self.managedcluster_models.ManagedClusterAgentPoolProfile(**agentpoolprofile)
-
- def create_service_principal_profile_instance(self, spnprofile):
- '''
- Helper method to serialize a dict to a ManagedClusterServicePrincipalProfile
- :param: spnprofile: dict with the parameters to setup the ManagedClusterServicePrincipalProfile
- :return: ManagedClusterServicePrincipalProfile
- '''
- return self.managedcluster_models.ManagedClusterServicePrincipalProfile(
- client_id=spnprofile['client_id'],
- secret=spnprofile['client_secret']
- )
-
- def create_linux_profile_instance(self, linuxprofile):
- '''
- Helper method to serialize a dict to a ContainerServiceLinuxProfile
- :param: linuxprofile: dict with the parameters to setup the ContainerServiceLinuxProfile
- :return: ContainerServiceLinuxProfile
- '''
- return self.managedcluster_models.ContainerServiceLinuxProfile(
- admin_username=linuxprofile['admin_username'],
- ssh=self.managedcluster_models.ContainerServiceSshConfiguration(public_keys=[
- self.managedcluster_models.ContainerServiceSshPublicKey(key_data=str(linuxprofile['ssh_key']))])
- )
-
- def create_network_profile_instance(self, network):
- return self.managedcluster_models.ContainerServiceNetworkProfile(**network) if network else None
-
- def create_aad_profile_instance(self, aad):
- return self.managedcluster_models.ManagedClusterAADProfile(**aad) if aad else None
-
- def create_addon_profile_instance(self, addon):
- result = dict()
- addon = addon or {}
- for key in addon.keys():
- if not ADDONS.get(key):
- self.fail('Unsupported addon {0}'.format(key))
- if addon.get(key):
- name = ADDONS[key]['name']
- config_spec = ADDONS[key].get('config') or dict()
- config = addon[key]
- for v in config_spec.keys():
- config[config_spec[v]] = config[v]
- result[name] = self.managedcluster_models.ManagedClusterAddonProfile(config=config, enabled=config['enabled'])
- return result
-
-
-def main():
- """Main execution"""
- AzureRMManagedCluster()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_aks_info.py b/lib/ansible/modules/cloud/azure/azure_rm_aks_info.py
deleted file mode 100644
index 3eb0ef1230..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_aks_info.py
+++ /dev/null
@@ -1,191 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_aks_info
-
-version_added: "2.9"
-
-short_description: Get Azure Kubernetes Service facts
-
-description:
- - Get facts for a specific Azure Kubernetes Service or all Azure Kubernetes Services.
-
-options:
- name:
- description:
- - Limit results to a specific resource group.
- resource_group:
- description:
- - The resource group to search for the desired Azure Kubernetes Service
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- show_kubeconfig:
- description:
- - Show kubeconfig of the AKS cluster.
- - Note the operation will cost more network overhead, not recommended when listing AKS.
- version_added: "2.8"
- choices:
- - user
- - admin
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yuwei Zhou (@yuwzho)
-'''
-
-EXAMPLES = '''
- - name: Get facts for one Azure Kubernetes Service
- azure_rm_aks_info:
- name: Testing
- resource_group: myResourceGroup
-
- - name: Get facts for all Azure Kubernetes Services
- azure_rm_aks_info:
-
- - name: Get facts by tags
- azure_rm_aks_info:
- tags:
- - testing
-'''
-
-RETURN = '''
-azure_aks:
- description: List of Azure Kubernetes Service dicts.
- returned: always
- type: list
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.common import AzureHttpError
-except Exception:
- # handled in azure_rm_common
- pass
-
-AZURE_OBJECT_CLASS = 'managedClusters'
-
-
-class AzureRMManagedClusterInfo(AzureRMModuleBase):
- """Utility class to get Azure Kubernetes Service facts"""
-
- def __init__(self):
-
- self.module_args = dict(
- name=dict(type='str'),
- resource_group=dict(type='str'),
- tags=dict(type='list'),
- show_kubeconfig=dict(type='str', choices=['user', 'admin']),
- )
-
- self.results = dict(
- changed=False,
- aks=[],
- available_versions=[]
- )
-
- self.name = None
- self.resource_group = None
- self.tags = None
- self.show_kubeconfig = None
-
- super(AzureRMManagedClusterInfo, self).__init__(
- derived_arg_spec=self.module_args,
- supports_tags=False,
- facts_module=True
- )
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_aks_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_aks_facts' module has been renamed to 'azure_rm_aks_info'", version='2.13')
-
- for key in self.module_args:
- setattr(self, key, kwargs[key])
-
- self.results['aks'] = (
- self.get_item() if self.name
- else self.list_items()
- )
-
- return self.results
-
- def get_item(self):
- """Get a single Azure Kubernetes Service"""
-
- self.log('Get properties for {0}'.format(self.name))
-
- item = None
- result = []
-
- try:
- item = self.managedcluster_client.managed_clusters.get(self.resource_group, self.name)
- except CloudError:
- pass
-
- if item and self.has_tags(item.tags, self.tags):
- result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
- if self.show_kubeconfig:
- result[0]['kube_config'] = self.get_aks_kubeconfig(self.resource_group, self.name)
-
- return result
-
- def list_items(self):
- """Get all Azure Kubernetes Services"""
-
- self.log('List all Azure Kubernetes Services')
-
- try:
- response = self.managedcluster_client.managed_clusters.list(self.resource_group)
- except AzureHttpError as exc:
- self.fail('Failed to list all items - {0}'.format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- item_dict = self.serialize_obj(item, AZURE_OBJECT_CLASS)
- if self.show_kubeconfig:
- item_dict['kube_config'] = self.get_aks_kubeconfig(self.resource_group, item.name)
- results.append(item_dict)
-
- return results
-
- def get_aks_kubeconfig(self, resource_group, name):
- '''
- Gets kubeconfig for the specified AKS instance.
-
- :return: AKS instance kubeconfig
- '''
- if not self.show_kubeconfig:
- return ''
- role_name = 'cluster{0}'.format(str.capitalize(self.show_kubeconfig))
- access_profile = self.managedcluster_client.managed_clusters.get_access_profile(resource_group, name, role_name)
- return access_profile.kube_config.decode('utf-8')
-
-
-def main():
- """Main module execution code path"""
-
- AzureRMManagedClusterInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_aksversion_info.py b/lib/ansible/modules/cloud/azure/azure_rm_aksversion_info.py
deleted file mode 100644
index 462ee78552..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_aksversion_info.py
+++ /dev/null
@@ -1,133 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_aksversion_info
-
-version_added: "2.9"
-
-short_description: Get available kubernetes versions supported by Azure Kubernetes Service
-
-description:
- - Get available kubernetes versions supported by Azure Kubernetes Service.
-
-options:
- location:
- description:
- - Get the versions available for creating a managed Kubernetes cluster.
- required: true
- version:
- description:
- - Get the upgrade versions available for a managed Kubernetes cluster version.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yuwei Zhou (@yuwzho)
-'''
-
-EXAMPLES = '''
- - name: Get available versions for AKS in location eastus
- azure_rm_aksversion_info:
- location: eastus
- - name: Get available versions an AKS can be upgrade to
- azure_rm_aksversion_info:
- location: eastis
- version: 1.11.6
-'''
-
-RETURN = '''
-azure_aks_versions:
- description: List of supported kubernetes versions.
- returned: always
- type: list
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.common import AzureHttpError
-except Exception:
- # handled in azure_rm_common
- pass
-
-
-class AzureRMAKSVersion(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_args = dict(
- location=dict(type='str', required=True),
- version=dict(type='str')
- )
-
- self.results = dict(
- changed=False,
- azure_aks_versions=[]
- )
-
- self.location = None
- self.version = None
-
- super(AzureRMAKSVersion, self).__init__(
- derived_arg_spec=self.module_args,
- supports_tags=False,
- facts_module=True
- )
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_aksversion_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_aksversion_facts' module has been renamed to 'azure_rm_aksversion_info'", version='2.13')
-
- for key in self.module_args:
- setattr(self, key, kwargs[key])
-
- self.results['azure_aks_versions'] = self.get_all_versions(self.location, self.version)
-
- return self.results
-
- def get_all_versions(self, location, version):
- '''
- Get all kubernetes version supported by AKS
- :return: ordered version list
- '''
- try:
- result = dict()
- response = self.containerservice_client.container_services.list_orchestrators(self.location, resource_type='managedClusters')
- orchestrators = response.orchestrators
- for item in orchestrators:
- result[item.orchestrator_version] = [x.orchestrator_version for x in item.upgrades] if item.upgrades else []
- if version:
- return result.get(version) or []
- else:
- keys = list(result.keys())
- keys.sort()
- return keys
- except Exception as exc:
- self.fail('Error when getting AKS supported kubernetes version list for location {0} - {1}'.format(self.location, exc.message or str(exc)))
-
-
-def main():
- """Main module execution code path"""
-
- AzureRMAKSVersion()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_appgateway.py b/lib/ansible/modules/cloud/azure/azure_rm_appgateway.py
deleted file mode 100644
index 3aaada3c51..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_appgateway.py
+++ /dev/null
@@ -1,1009 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_appgateway
-version_added: "2.7"
-short_description: Manage Application Gateway instance
-description:
- - Create, update and delete instance of Application Gateway.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- name:
- description:
- - The name of the application gateway.
- required: True
- location:
- description:
- - Resource location. If not set, location from the resource group will be used as default.
- sku:
- description:
- - SKU of the application gateway resource.
- suboptions:
- name:
- description:
- - Name of an application gateway SKU.
- choices:
- - 'standard_small'
- - 'standard_medium'
- - 'standard_large'
- - 'waf_medium'
- - 'waf_large'
- tier:
- description:
- - Tier of an application gateway.
- choices:
- - 'standard'
- - 'waf'
- capacity:
- description:
- - Capacity (instance count) of an application gateway.
- ssl_policy:
- description:
- - SSL policy of the application gateway resource.
- suboptions:
- disabled_ssl_protocols:
- description:
- - List of SSL protocols to be disabled on application gateway.
- choices:
- - 'tls_v1_0'
- - 'tls_v1_1'
- - 'tls_v1_2'
- policy_type:
- description:
- - Type of SSL Policy.
- choices:
- - 'predefined'
- - 'custom'
- policy_name:
- description:
- - Name of Ssl C(predefined) policy.
- choices:
- - 'ssl_policy20150501'
- - 'ssl_policy20170401'
- - 'ssl_policy20170401_s'
- cipher_suites:
- description:
- - List of SSL cipher suites to be enabled in the specified order to application gateway.
- choices:
- - tls_ecdhe_rsa_with_aes_256_gcm_sha384
- - tls_ecdhe_rsa_with_aes_128_gcm_sha256
- - tls_ecdhe_rsa_with_aes_256_cbc_sha384
- - tls_ecdhe_rsa_with_aes_128_cbc_sha256
- - tls_ecdhe_rsa_with_aes_256_cbc_sha
- - tls_ecdhe_rsa_with_aes_128_cbc_sha
- - tls_dhe_rsa_with_aes_256_gcm_sha384
- - tls_dhe_rsa_with_aes_128_gcm_sha256
- - tls_dhe_rsa_with_aes_256_cbc_sha
- - tls_dhe_rsa_with_aes_128_cbc_sha
- - tls_rsa_with_aes_256_gcm_sha384
- - tls_rsa_with_aes_128_gcm_sha256
- - tls_rsa_with_aes_256_cbc_sha256
- - tls_rsa_with_aes_128_cbc_sha256
- - tls_rsa_with_aes_256_cbc_sha
- - tls_rsa_with_aes_128_cbc_sha
- - tls_ecdhe_ecdsa_with_aes_256_gcm_sha384
- - tls_ecdhe_ecdsa_with_aes_128_gcm_sha256
- - tls_ecdhe_ecdsa_with_aes_256_cbc_sha384
- - tls_ecdhe_ecdsa_with_aes_128_cbc_sha256
- - tls_ecdhe_ecdsa_with_aes_256_cbc_sha
- - tls_ecdhe_ecdsa_with_aes_128_cbc_sha
- - tls_dhe_dss_with_aes_256_cbc_sha256
- - tls_dhe_dss_with_aes_128_cbc_sha256
- - tls_dhe_dss_with_aes_256_cbc_sha
- - tls_dhe_dss_with_aes_128_cbc_sha
- - tls_rsa_with_3des_ede_cbc_sha
- - tls_dhe_dss_with_3des_ede_cbc_sha
- min_protocol_version:
- description:
- - Minimum version of Ssl protocol to be supported on application gateway.
- choices:
- - 'tls_v1_0'
- - 'tls_v1_1'
- - 'tls_v1_2'
- gateway_ip_configurations:
- description:
- - List of subnets used by the application gateway.
- suboptions:
- subnet:
- description:
- - Reference of the subnet resource. A subnet from where application gateway gets its private address.
- name:
- description:
- - Name of the resource that is unique within a resource group. This name can be used to access the resource.
- authentication_certificates:
- description:
- - Authentication certificates of the application gateway resource.
- suboptions:
- data:
- description:
- - Certificate public data - base64 encoded pfx.
- name:
- description:
- - Name of the resource that is unique within a resource group. This name can be used to access the resource.
- redirect_configurations:
- version_added: "2.8"
- description:
- - Redirect configurations of the application gateway resource.
- suboptions:
- redirect_type:
- description:
- - Redirection type.
- choices:
- - 'permanent'
- - 'found'
- - 'see_other'
- - 'temporary'
- target_listener:
- description:
- - Reference to a listener to redirect the request to.
- include_path:
- description:
- - Include path in the redirected url.
- include_query_string:
- description:
- - Include query string in the redirected url.
- name:
- description:
- - Name of the resource that is unique within a resource group.
- ssl_certificates:
- description:
- - SSL certificates of the application gateway resource.
- suboptions:
- data:
- description:
- - Base-64 encoded pfx certificate.
- - Only applicable in PUT Request.
- password:
- description:
- - Password for the pfx file specified in I(data).
- - Only applicable in PUT request.
- name:
- description:
- - Name of the resource that is unique within a resource group. This name can be used to access the resource.
- frontend_ip_configurations:
- description:
- - Frontend IP addresses of the application gateway resource.
- suboptions:
- private_ip_address:
- description:
- - PrivateIPAddress of the network interface IP Configuration.
- private_ip_allocation_method:
- description:
- - PrivateIP allocation method.
- choices:
- - 'static'
- - 'dynamic'
- subnet:
- description:
- - Reference of the subnet resource.
- public_ip_address:
- description:
- - Reference of the PublicIP resource.
- name:
- description:
- - Name of the resource that is unique within a resource group. This name can be used to access the resource.
- frontend_ports:
- description:
- - List of frontend ports of the application gateway resource.
- suboptions:
- port:
- description:
- - Frontend port.
- name:
- description:
- - Name of the resource that is unique within a resource group. This name can be used to access the resource.
- backend_address_pools:
- description:
- - List of backend address pool of the application gateway resource.
- suboptions:
- backend_addresses:
- description:
- - List of backend addresses.
- suboptions:
- fqdn:
- description:
- - Fully qualified domain name (FQDN).
- ip_address:
- description:
- - IP address.
- name:
- description:
- - Resource that is unique within a resource group. This name can be used to access the resource.
- probes:
- version_added: "2.8"
- description:
- - Probes available to the application gateway resource.
- suboptions:
- name:
- description:
- - Name of the I(probe) that is unique within an Application Gateway.
- protocol:
- description:
- - The protocol used for the I(probe).
- choices:
- - 'http'
- - 'https'
- host:
- description:
- - Host name to send the I(probe) to.
- path:
- description:
- - Relative path of I(probe).
- - Valid path starts from '/'.
- - Probe is sent to <Protocol>://<host>:<port><path>.
- timeout:
- description:
- - The probe timeout in seconds.
- - Probe marked as failed if valid response is not received with this timeout period.
- - Acceptable values are from 1 second to 86400 seconds.
- interval:
- description:
- - The probing interval in seconds.
- - This is the time interval between two consecutive probes.
- - Acceptable values are from 1 second to 86400 seconds.
- unhealthy_threshold:
- description:
- - The I(probe) retry count.
- - Backend server is marked down after consecutive probe failure count reaches UnhealthyThreshold.
- - Acceptable values are from 1 second to 20.
- backend_http_settings_collection:
- description:
- - Backend http settings of the application gateway resource.
- suboptions:
- probe:
- description:
- - Probe resource of an application gateway.
- port:
- description:
- - The destination port on the backend.
- protocol:
- description:
- - The protocol used to communicate with the backend.
- choices:
- - 'http'
- - 'https'
- cookie_based_affinity:
- description:
- - Cookie based affinity.
- choices:
- - 'enabled'
- - 'disabled'
- request_timeout:
- description:
- - Request timeout in seconds.
- - Application Gateway will fail the request if response is not received within RequestTimeout.
- - Acceptable values are from 1 second to 86400 seconds.
- authentication_certificates:
- description:
- - List of references to application gateway authentication certificates.
- - Applicable only when C(cookie_based_affinity) is enabled, otherwise quietly ignored.
- suboptions:
- id:
- description:
- - Resource ID.
- host_name:
- description:
- - Host header to be sent to the backend servers.
- pick_host_name_from_backend_address:
- description:
- - Whether to pick host header should be picked from the host name of the backend server. Default value is false.
- affinity_cookie_name:
- description:
- - Cookie name to use for the affinity cookie.
- path:
- description:
- - Path which should be used as a prefix for all C(http) requests.
- - Null means no path will be prefixed. Default value is null.
- name:
- description:
- - Name of the resource that is unique within a resource group. This name can be used to access the resource.
- http_listeners:
- description:
- - List of HTTP listeners of the application gateway resource.
- suboptions:
- frontend_ip_configuration:
- description:
- - Frontend IP configuration resource of an application gateway.
- frontend_port:
- description:
- - Frontend port resource of an application gateway.
- protocol:
- description:
- - Protocol of the C(http) listener.
- choices:
- - 'http'
- - 'https'
- host_name:
- description:
- - Host name of C(http) listener.
- ssl_certificate:
- description:
- - SSL certificate resource of an application gateway.
- require_server_name_indication:
- description:
- - Applicable only if I(protocol) is C(https). Enables SNI for multi-hosting.
- name:
- description:
- - Name of the resource that is unique within a resource group. This name can be used to access the resource.
- request_routing_rules:
- description:
- - List of request routing rules of the application gateway resource.
- suboptions:
- rule_type:
- description:
- - Rule type.
- choices:
- - 'basic'
- - 'path_based_routing'
- backend_address_pool:
- description:
- - Backend address pool resource of the application gateway.
- backend_http_settings:
- description:
- - Backend C(http) settings resource of the application gateway.
- http_listener:
- description:
- - Http listener resource of the application gateway.
- name:
- description:
- - Name of the resource that is unique within a resource group. This name can be used to access the resource.
- redirect_configuration:
- description:
- - Redirect configuration resource of the application gateway.
- state:
- description:
- - Assert the state of the Public IP. Use C(present) to create or update a and
- C(absent) to delete.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
-- name: Create instance of Application Gateway
- azure_rm_appgateway:
- resource_group: myResourceGroup
- name: myAppGateway
- sku:
- name: standard_small
- tier: standard
- capacity: 2
- gateway_ip_configurations:
- - subnet:
- id: "{{ subnet_id }}"
- name: app_gateway_ip_config
- frontend_ip_configurations:
- - subnet:
- id: "{{ subnet_id }}"
- name: sample_gateway_frontend_ip_config
- frontend_ports:
- - port: 90
- name: ag_frontend_port
- backend_address_pools:
- - backend_addresses:
- - ip_address: 10.0.0.4
- name: test_backend_address_pool
- backend_http_settings_collection:
- - port: 80
- protocol: http
- cookie_based_affinity: enabled
- name: sample_appgateway_http_settings
- http_listeners:
- - frontend_ip_configuration: sample_gateway_frontend_ip_config
- frontend_port: ag_frontend_port
- name: sample_http_listener
- request_routing_rules:
- - rule_type: Basic
- backend_address_pool: test_backend_address_pool
- backend_http_settings: sample_appgateway_http_settings
- http_listener: sample_http_listener
- name: rule1
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: id
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from copy import deepcopy
-from ansible.module_utils.network.common.utils import dict_merge
-from ansible.module_utils.common.dict_transformations import (
- camel_dict_to_snake_dict, snake_dict_to_camel_dict,
- _camel_to_snake, _snake_to_camel,
-)
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from azure.mgmt.network import NetworkManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-ssl_policy_spec = dict(
- disabled_ssl_protocols=dict(type='list'),
- policy_type=dict(type='str', choices=['predefined', 'custom']),
- policy_name=dict(type='str', choices=['ssl_policy20150501', 'ssl_policy20170401', 'ssl_policy20170401_s']),
- cipher_suites=dict(type='list'),
- min_protocol_version=dict(type='str', choices=['tls_v1_0', 'tls_v1_1', 'tls_v1_2'])
-)
-
-
-probe_spec = dict(
- host=dict(type='str'),
- interval=dict(type='int'),
- name=dict(type='str'),
- path=dict(type='str'),
- protocol=dict(type='str', choices=['http', 'https']),
- timeout=dict(type='int'),
- unhealthy_threshold=dict(type='int')
-)
-
-
-redirect_configuration_spec = dict(
- include_path=dict(type='bool'),
- include_query_string=dict(type='bool'),
- name=dict(type='str'),
- redirect_type=dict(type='str', choices=['permanent', 'found', 'see_other', 'temporary']),
- target_listener=dict(type='str')
-)
-
-
-class AzureRMApplicationGateways(AzureRMModuleBase):
- """Configuration class for an Azure RM Application Gateway resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- sku=dict(
- type='dict'
- ),
- ssl_policy=dict(
- type='dict',
- options=ssl_policy_spec
- ),
- gateway_ip_configurations=dict(
- type='list'
- ),
- authentication_certificates=dict(
- type='list'
- ),
- ssl_certificates=dict(
- type='list'
- ),
- redirect_configurations=dict(
- type='list',
- elements='dict',
- options=redirect_configuration_spec
- ),
- frontend_ip_configurations=dict(
- type='list'
- ),
- frontend_ports=dict(
- type='list'
- ),
- backend_address_pools=dict(
- type='list'
- ),
- backend_http_settings_collection=dict(
- type='list'
- ),
- probes=dict(
- type='list',
- elements='dict',
- options=probe_spec
- ),
- http_listeners=dict(
- type='list'
- ),
- request_routing_rules=dict(
- type='list'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.name = None
- self.parameters = dict()
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMApplicationGateways, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- if key == "id":
- self.parameters["id"] = kwargs[key]
- elif key == "location":
- self.parameters["location"] = kwargs[key]
- elif key == "sku":
- ev = kwargs[key]
- if 'name' in ev:
- if ev['name'] == 'standard_small':
- ev['name'] = 'Standard_Small'
- elif ev['name'] == 'standard_medium':
- ev['name'] = 'Standard_Medium'
- elif ev['name'] == 'standard_large':
- ev['name'] = 'Standard_Large'
- elif ev['name'] == 'waf_medium':
- ev['name'] = 'WAF_Medium'
- elif ev['name'] == 'waf_large':
- ev['name'] = 'WAF_Large'
- if 'tier' in ev:
- if ev['tier'] == 'standard':
- ev['tier'] = 'Standard'
- elif ev['tier'] == 'waf':
- ev['tier'] = 'WAF'
- self.parameters["sku"] = ev
- elif key == "ssl_policy":
- ev = kwargs[key]
- if 'policy_type' in ev:
- ev['policy_type'] = _snake_to_camel(ev['policy_type'], True)
- if 'policy_name' in ev:
- if ev['policy_name'] == 'ssl_policy20150501':
- ev['policy_name'] = 'AppGwSslPolicy20150501'
- elif ev['policy_name'] == 'ssl_policy20170401':
- ev['policy_name'] = 'AppGwSslPolicy20170401'
- elif ev['policy_name'] == 'ssl_policy20170401_s':
- ev['policy_name'] = 'AppGwSslPolicy20170401S'
- if 'min_protocol_version' in ev:
- if ev['min_protocol_version'] == 'tls_v1_0':
- ev['min_protocol_version'] = 'TLSv1_0'
- elif ev['min_protocol_version'] == 'tls_v1_1':
- ev['min_protocol_version'] = 'TLSv1_1'
- elif ev['min_protocol_version'] == 'tls_v1_2':
- ev['min_protocol_version'] = 'TLSv1_2'
- if 'disabled_ssl_protocols' in ev:
- protocols = ev['disabled_ssl_protocols']
- if protocols is not None:
- for i in range(len(protocols)):
- if protocols[i] == 'tls_v1_0':
- protocols[i] = 'TLSv1_0'
- elif protocols[i] == 'tls_v1_1':
- protocols[i] = 'TLSv1_1'
- elif protocols[i] == 'tls_v1_2':
- protocols[i] = 'TLSv1_2'
- if 'cipher_suites' in ev:
- suites = ev['cipher_suites']
- if suites is not None:
- for i in range(len(suites)):
- suites[i] = suites[i].upper()
- elif key == "gateway_ip_configurations":
- self.parameters["gateway_ip_configurations"] = kwargs[key]
- elif key == "authentication_certificates":
- self.parameters["authentication_certificates"] = kwargs[key]
- elif key == "ssl_certificates":
- self.parameters["ssl_certificates"] = kwargs[key]
- elif key == "redirect_configurations":
- ev = kwargs[key]
- for i in range(len(ev)):
- item = ev[i]
- if 'redirect_type' in item:
- item['redirect_type'] = _snake_to_camel(item['redirect_type'], True)
- if 'target_listener' in item:
- id = http_listener_id(self.subscription_id,
- kwargs['resource_group'],
- kwargs['name'],
- item['target_listener'])
- item['target_listener'] = {'id': id}
- self.parameters["redirect_configurations"] = ev
- elif key == "frontend_ip_configurations":
- ev = kwargs[key]
- for i in range(len(ev)):
- item = ev[i]
- if 'private_ip_allocation_method' in item:
- item['private_ip_allocation_method'] = _snake_to_camel(item['private_ip_allocation_method'], True)
- if 'public_ip_address' in item:
- id = public_ip_id(self.subscription_id,
- kwargs['resource_group'],
- item['public_ip_address'])
- item['public_ip_address'] = {'id': id}
- self.parameters["frontend_ip_configurations"] = ev
- elif key == "frontend_ports":
- self.parameters["frontend_ports"] = kwargs[key]
- elif key == "backend_address_pools":
- self.parameters["backend_address_pools"] = kwargs[key]
- elif key == "probes":
- ev = kwargs[key]
- for i in range(len(ev)):
- item = ev[i]
- if 'protocol' in item:
- item['protocol'] = _snake_to_camel(item['protocol'], True)
- self.parameters["probes"] = ev
- elif key == "backend_http_settings_collection":
- ev = kwargs[key]
- for i in range(len(ev)):
- item = ev[i]
- if 'protocol' in item:
- item['protocol'] = _snake_to_camel(item['protocol'], True)
- if 'cookie_based_affinity' in item:
- item['cookie_based_affinity'] = _snake_to_camel(item['cookie_based_affinity'], True)
- if 'probe' in item:
- id = probe_id(self.subscription_id,
- kwargs['resource_group'],
- kwargs['name'],
- item['probe'])
- item['probe'] = {'id': id}
- self.parameters["backend_http_settings_collection"] = ev
- elif key == "http_listeners":
- ev = kwargs[key]
- for i in range(len(ev)):
- item = ev[i]
- if 'frontend_ip_configuration' in item:
- id = frontend_ip_configuration_id(self.subscription_id,
- kwargs['resource_group'],
- kwargs['name'],
- item['frontend_ip_configuration'])
- item['frontend_ip_configuration'] = {'id': id}
-
- if 'frontend_port' in item:
- id = frontend_port_id(self.subscription_id,
- kwargs['resource_group'],
- kwargs['name'],
- item['frontend_port'])
- item['frontend_port'] = {'id': id}
- if 'ssl_certificate' in item:
- id = ssl_certificate_id(self.subscription_id,
- kwargs['resource_group'],
- kwargs['name'],
- item['ssl_certificate'])
- item['ssl_certificate'] = {'id': id}
- if 'protocol' in item:
- item['protocol'] = _snake_to_camel(item['protocol'], True)
- ev[i] = item
- self.parameters["http_listeners"] = ev
- elif key == "request_routing_rules":
- ev = kwargs[key]
- for i in range(len(ev)):
- item = ev[i]
- if 'backend_address_pool' in item:
- id = backend_address_pool_id(self.subscription_id,
- kwargs['resource_group'],
- kwargs['name'],
- item['backend_address_pool'])
- item['backend_address_pool'] = {'id': id}
- if 'backend_http_settings' in item:
- id = backend_http_settings_id(self.subscription_id,
- kwargs['resource_group'],
- kwargs['name'],
- item['backend_http_settings'])
- item['backend_http_settings'] = {'id': id}
- if 'http_listener' in item:
- id = http_listener_id(self.subscription_id,
- kwargs['resource_group'],
- kwargs['name'],
- item['http_listener'])
- item['http_listener'] = {'id': id}
- if 'protocol' in item:
- item['protocol'] = _snake_to_camel(item['protocol'], True)
- if 'rule_type' in ev:
- item['rule_type'] = _snake_to_camel(item['rule_type'], True)
- if 'redirect_configuration' in item:
- id = redirect_configuration_id(self.subscription_id,
- kwargs['resource_group'],
- kwargs['name'],
- item['redirect_configuration'])
- item['redirect_configuration'] = {'id': id}
- ev[i] = item
- self.parameters["request_routing_rules"] = ev
- elif key == "etag":
- self.parameters["etag"] = kwargs[key]
-
- old_response = None
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(NetworkManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- resource_group = self.get_resource_group(self.resource_group)
-
- if "location" not in self.parameters:
- self.parameters["location"] = resource_group.location
-
- old_response = self.get_applicationgateway()
-
- if not old_response:
- self.log("Application Gateway instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("Application Gateway instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.log("Need to check if Application Gateway instance has to be deleted or may be updated")
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Update):
- if (self.parameters['location'] != old_response['location'] or
- self.parameters['sku']['name'] != old_response['sku']['name'] or
- self.parameters['sku']['tier'] != old_response['sku']['tier'] or
- self.parameters['sku']['capacity'] != old_response['sku']['capacity'] or
- not compare_arrays(old_response, self.parameters, 'authentication_certificates') or
- not compare_arrays(old_response, self.parameters, 'gateway_ip_configurations') or
- not compare_arrays(old_response, self.parameters, 'redirect_configurations') or
- not compare_arrays(old_response, self.parameters, 'frontend_ip_configurations') or
- not compare_arrays(old_response, self.parameters, 'frontend_ports') or
- not compare_arrays(old_response, self.parameters, 'backend_address_pools') or
- not compare_arrays(old_response, self.parameters, 'probes') or
- not compare_arrays(old_response, self.parameters, 'backend_http_settings_collection') or
- not compare_arrays(old_response, self.parameters, 'request_routing_rules') or
- not compare_arrays(old_response, self.parameters, 'http_listeners')):
-
- self.to_do = Actions.Update
- else:
- self.to_do = Actions.NoAction
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the Application Gateway instance")
-
- if self.check_mode:
- self.results['changed'] = True
- self.results["parameters"] = self.parameters
- return self.results
-
- response = self.create_update_applicationgateway()
-
- if not old_response:
- self.results['changed'] = True
- else:
- self.results['changed'] = old_response.__ne__(response)
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("Application Gateway instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_applicationgateway()
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_applicationgateway():
- time.sleep(20)
- else:
- self.log("Application Gateway instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
-
- return self.results
-
- def create_update_applicationgateway(self):
- '''
- Creates or updates Application Gateway with the specified configuration.
-
- :return: deserialized Application Gateway instance state dictionary
- '''
- self.log("Creating / Updating the Application Gateway instance {0}".format(self.name))
-
- try:
- response = self.mgmt_client.application_gateways.create_or_update(resource_group_name=self.resource_group,
- application_gateway_name=self.name,
- parameters=self.parameters)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the Application Gateway instance.')
- self.fail("Error creating the Application Gateway instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_applicationgateway(self):
- '''
- Deletes specified Application Gateway instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the Application Gateway instance {0}".format(self.name))
- try:
- response = self.mgmt_client.application_gateways.delete(resource_group_name=self.resource_group,
- application_gateway_name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the Application Gateway instance.')
- self.fail("Error deleting the Application Gateway instance: {0}".format(str(e)))
-
- return True
-
- def get_applicationgateway(self):
- '''
- Gets the properties of the specified Application Gateway.
-
- :return: deserialized Application Gateway instance state dictionary
- '''
- self.log("Checking if the Application Gateway instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mgmt_client.application_gateways.get(resource_group_name=self.resource_group,
- application_gateway_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("Application Gateway instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the Application Gateway instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def public_ip_id(subscription_id, resource_group_name, name):
- """Generate the id for a frontend ip configuration"""
- return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/publicIPAddresses/{2}'.format(
- subscription_id,
- resource_group_name,
- name
- )
-
-
-def frontend_ip_configuration_id(subscription_id, resource_group_name, appgateway_name, name):
- """Generate the id for a frontend ip configuration"""
- return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/frontendIPConfigurations/{3}'.format(
- subscription_id,
- resource_group_name,
- appgateway_name,
- name
- )
-
-
-def frontend_port_id(subscription_id, resource_group_name, appgateway_name, name):
- """Generate the id for a frontend port"""
- return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/frontendPorts/{3}'.format(
- subscription_id,
- resource_group_name,
- appgateway_name,
- name
- )
-
-
-def redirect_configuration_id(subscription_id, resource_group_name, appgateway_name, name):
- """Generate the id for a redirect configuration"""
- return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/redirectConfigurations/{3}'.format(
- subscription_id,
- resource_group_name,
- appgateway_name,
- name
- )
-
-
-def ssl_certificate_id(subscription_id, resource_group_name, ssl_certificate_name, name):
- """Generate the id for a frontend port"""
- return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/sslCertificates/{3}'.format(
- subscription_id,
- resource_group_name,
- ssl_certificate_name,
- name
- )
-
-
-def backend_address_pool_id(subscription_id, resource_group_name, appgateway_name, name):
- """Generate the id for an address pool"""
- return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/backendAddressPools/{3}'.format(
- subscription_id,
- resource_group_name,
- appgateway_name,
- name
- )
-
-
-def probe_id(subscription_id, resource_group_name, appgateway_name, name):
- """Generate the id for a probe"""
- return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/probes/{3}'.format(
- subscription_id,
- resource_group_name,
- appgateway_name,
- name
- )
-
-
-def backend_http_settings_id(subscription_id, resource_group_name, appgateway_name, name):
- """Generate the id for a http settings"""
- return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/backendHttpSettingsCollection/{3}'.format(
- subscription_id,
- resource_group_name,
- appgateway_name,
- name
- )
-
-
-def http_listener_id(subscription_id, resource_group_name, appgateway_name, name):
- """Generate the id for a http listener"""
- return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationGateways/{2}/httpListeners/{3}'.format(
- subscription_id,
- resource_group_name,
- appgateway_name,
- name
- )
-
-
-def compare_arrays(old_params, new_params, param_name):
- old = old_params.get(param_name) or []
- new = new_params.get(param_name) or []
-
- oldd = {}
- for item in old:
- name = item['name']
- oldd[name] = item
- newd = {}
- for item in new:
- name = item['name']
- newd[name] = item
-
- newd = dict_merge(oldd, newd)
- return newd == oldd
-
-
-def main():
- """Main execution"""
- AzureRMApplicationGateways()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_applicationsecuritygroup.py b/lib/ansible/modules/cloud/azure/azure_rm_applicationsecuritygroup.py
deleted file mode 100644
index 5665e23a76..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_applicationsecuritygroup.py
+++ /dev/null
@@ -1,250 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_applicationsecuritygroup
-version_added: "2.8"
-short_description: Manage Azure Application Security Group
-description:
- - Create, update and delete instance of Azure Application Security Group.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- name:
- description:
- - The name of the application security group.
- required: True
- location:
- description:
- - Resource location. If not set, location from the resource group will be used as default.
- state:
- description:
- - Assert the state of the Application Security Group.
- - Use C(present) to create or update an Application Security Group and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yunge Zhu (@yungezz)
-
-'''
-
-EXAMPLES = '''
- - name: Create application security group
- azure_rm_applicationsecuritygroup:
- resource_group: myResourceGroup
- name: mySecurityGroup
- location: eastus
- tags:
- foo: bar
-'''
-
-RETURN = '''
-id:
- description:
- - Resource id of the application security group.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/applicationSecurityGroups/
- mySecurityGroup"
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrestazure.azure_operation import AzureOperationPoller
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, CreateOrUpdate, Delete = range(3)
-
-
-class AzureRMApplicationSecurityGroup(AzureRMModuleBase):
- """Configuration class for an Azure RM Application Security Group resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.location = None
- self.name = None
- self.tags = None
-
- self.state = None
-
- self.results = dict(changed=False)
-
- self.to_do = Actions.NoAction
-
- super(AzureRMApplicationSecurityGroup, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
-
- resource_group = self.get_resource_group(self.resource_group)
-
- if not self.location:
- self.location = resource_group.location
-
- old_response = self.get_applicationsecuritygroup()
-
- if not old_response:
- self.log("Application Security Group instance doesn't exist")
- if self.state == 'present':
- self.to_do = Actions.CreateOrUpdate
- else:
- self.log("Old instance didn't exist")
- else:
- self.log("Application Security Group instance already exists")
- if self.state == 'present':
- if self.check_update(old_response):
- self.to_do = Actions.CreateOrUpdate
-
- update_tags, self.tags = self.update_tags(old_response.get('tags', None))
- if update_tags:
- self.to_do = Actions.CreateOrUpdate
-
- elif self.state == 'absent':
- self.to_do = Actions.Delete
-
- if self.to_do == Actions.CreateOrUpdate:
- self.log("Need to Create / Update the Application Security Group instance")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- response = self.create_update_applicationsecuritygroup()
- self.results['id'] = response['id']
-
- elif self.to_do == Actions.Delete:
- self.log("Delete Application Security Group instance")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_applicationsecuritygroup()
-
- return self.results
-
- def check_update(self, existing_asg):
- if self.location and self.location.lower() != existing_asg['location'].lower():
- self.module.warn("location cannot be updated. Existing {0}, input {1}".format(existing_asg['location'], self.location))
- return False
-
- def create_update_applicationsecuritygroup(self):
- '''
- Create or update Application Security Group.
-
- :return: deserialized Application Security Group instance state dictionary
- '''
- self.log("Creating / Updating the Application Security Group instance {0}".format(self.name))
-
- param = dict(name=self.name,
- tags=self.tags,
- location=self.location)
- try:
- response = self.network_client.application_security_groups.create_or_update(resource_group_name=self.resource_group,
- application_security_group_name=self.name,
- parameters=param)
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error creating/updating Application Security Group instance.')
- self.fail("Error creating/updating Application Security Group instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_applicationsecuritygroup(self):
- '''
- Deletes specified Application Security Group instance.
-
- :return: True
- '''
- self.log("Deleting the Application Security Group instance {0}".format(self.name))
- try:
- response = self.network_client.application_security_groups.delete(resource_group_name=self.resource_group,
- application_security_group_name=self.name)
- except CloudError as e:
- self.log('Error deleting the Application Security Group instance.')
- self.fail("Error deleting the Application Security Group instance: {0}".format(str(e)))
-
- return True
-
- def get_applicationsecuritygroup(self):
- '''
- Gets the properties of the specified Application Security Group.
-
- :return: deserialized Application Security Group instance state dictionary
- '''
- self.log("Checking if the Application Security Group instance {0} is present".format(self.name))
- found = False
- try:
- response = self.network_client.application_security_groups.get(resource_group_name=self.resource_group,
- application_security_group_name=self.name)
- self.log("Response : {0}".format(response))
- self.log("Application Security Group instance : {0} found".format(response.name))
- return response.as_dict()
- except CloudError as e:
- self.log('Did not find the Application Security Group instance.')
- return False
-
-
-def main():
- """Main execution"""
- AzureRMApplicationSecurityGroup()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_applicationsecuritygroup_info.py b/lib/ansible/modules/cloud/azure/azure_rm_applicationsecuritygroup_info.py
deleted file mode 100644
index dc99350101..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_applicationsecuritygroup_info.py
+++ /dev/null
@@ -1,229 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yunge Zhu, <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_applicationsecuritygroup_info
-version_added: "2.9"
-short_description: Get Azure Application Security Group facts
-description:
- - Get facts of Azure Application Security Group.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- name:
- description:
- - The name of the application security group.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yunge Zhu (@yungezz)
-
-'''
-
-EXAMPLES = '''
- - name: List application security groups in specific resource group
- azure_rm_applicationsecuritygroup_info:
- resource_group: myResourceGroup
-
- - name: List application security groups in specific subscription
- azure_rm_applicationsecuritygroup_info:
-
- - name: Get application security group by name
- azure_rm_applicationsecuritygroup_info:
- resource_group: myResourceGroup
- name: myApplicationSecurityGroup
- tags:
- - foo
-'''
-
-RETURN = '''
-applicationsecuritygroups:
- description:
- - List of application security groups.
- returned: always
- type: complex
- contains:
- id:
- description: Id of the application security group.
- type: str
- returned: always
- sample:
- "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/applicationSecurityGroups/MyAsg"
- location:
- description:
- - Location of the application security group.
- type: str
- returned: always
- sample: eastus
- name:
- description:
- - Name of the resource.
- type: str
- returned: always
- sample: myAsg
- provisioning_state:
- description:
- - Provisioning state of application security group.
- type: str
- returned: always
- sample: Succeeded
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrestazure.azure_operation import AzureOperationPoller
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def applicationsecuritygroup_to_dict(asg):
- return dict(
- id=asg.id,
- location=asg.location,
- name=asg.name,
- tags=asg.tags,
- provisioning_state=asg.provisioning_state
- )
-
-
-class AzureRMApplicationSecurityGroupInfo(AzureRMModuleBase):
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str'
- ),
- name=dict(
- type='str'
- ),
- tags=dict(type='list')
- )
-
- self.resource_group = None
- self.name = None
- self.tags = None
-
- self.results = dict(changed=False)
-
- super(AzureRMApplicationSecurityGroupInfo, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=False,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- is_old_facts = self.module._name == 'azure_rm_applicationsecuritygroup_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_applicationsecuritygroup_facts' module has been renamed to 'azure_rm_applicationsecuritygroup_info'",
- version='2.13')
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
-
- if self.name:
- if self.resource_group:
- self.results['applicationsecuritygroups'] = self.get()
- else:
- self.fail("resource_group is required when filtering by name")
- elif self.resource_group:
- self.results['applicationsecuritygroups'] = self.list_by_resource_group()
- else:
- self.results['applicationsecuritygroups'] = self.list_all()
-
- return self.results
-
- def get(self):
- '''
- Gets the properties of the specified Application Security Group.
-
- :return: deserialized Application Security Group instance state dictionary
- '''
- self.log("Get the Application Security Group instance {0}".format(self.name))
-
- results = []
- try:
- response = self.network_client.application_security_groups.get(resource_group_name=self.resource_group,
- application_security_group_name=self.name)
- self.log("Response : {0}".format(response))
-
- if response and self.has_tags(response.tags, self.tags):
- results.append(applicationsecuritygroup_to_dict(response))
- except CloudError as e:
- self.fail('Did not find the Application Security Group instance.')
- return results
-
- def list_by_resource_group(self):
- '''
- Lists the properties of Application Security Groups in specific resource group.
-
- :return: deserialized Application Security Group instance state dictionary
- '''
- self.log("Get the Application Security Groups in resource group {0}".format(self.resource_group))
-
- results = []
- try:
- response = list(self.network_client.application_security_groups.list(resource_group_name=self.resource_group))
- self.log("Response : {0}".format(response))
-
- if response:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(applicationsecuritygroup_to_dict(item))
- except CloudError as e:
- self.log('Did not find the Application Security Group instance.')
- return results
-
- def list_all(self):
- '''
- Lists the properties of Application Security Groups in specific subscription.
-
- :return: deserialized Application Security Group instance state dictionary
- '''
- self.log("Get the Application Security Groups in current subscription")
-
- results = []
- try:
- response = list(self.network_client.application_security_groups.list_all())
- self.log("Response : {0}".format(response))
-
- if response:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(applicationsecuritygroup_to_dict(item))
- except CloudError as e:
- self.log('Did not find the Application Security Group instance.')
- return results
-
-
-def main():
- """Main execution"""
- AzureRMApplicationSecurityGroupInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_appserviceplan.py b/lib/ansible/modules/cloud/azure/azure_rm_appserviceplan.py
deleted file mode 100644
index ee871c352b..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_appserviceplan.py
+++ /dev/null
@@ -1,379 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_appserviceplan
-version_added: "2.7"
-short_description: Manage App Service Plan
-description:
- - Create, update and delete instance of App Service Plan.
-
-options:
- resource_group:
- description:
- - Name of the resource group to which the resource belongs.
- required: True
-
- name:
- description:
- - Unique name of the app service plan to create or update.
- required: True
-
- location:
- description:
- - Resource location. If not set, location from the resource group will be used as default.
-
- sku:
- description:
- - The pricing tiers, e.g., C(F1), C(D1), C(B1), C(B2), C(B3), C(S1), C(P1), C(P1V2) etc.
- - Please see U(https://azure.microsoft.com/en-us/pricing/details/app-service/plans/) for more detail.
- - For Linux app service plan, please see U(https://azure.microsoft.com/en-us/pricing/details/app-service/linux/) for more detail.
- is_linux:
- description:
- - Describe whether to host webapp on Linux worker.
- type: bool
- default: false
-
- number_of_workers:
- description:
- - Describe number of workers to be allocated.
-
- state:
- description:
- - Assert the state of the app service plan.
- - Use C(present) to create or update an app service plan and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yunge Zhu (@yungezz)
-
-'''
-
-EXAMPLES = '''
- - name: Create a windows app service plan
- azure_rm_appserviceplan:
- resource_group: myResourceGroup
- name: myAppPlan
- location: eastus
- sku: S1
-
- - name: Create a linux app service plan
- azure_rm_appserviceplan:
- resource_group: myResourceGroup
- name: myAppPlan
- location: eastus
- sku: S1
- is_linux: true
- number_of_workers: 1
-
- - name: update sku of existing windows app service plan
- azure_rm_appserviceplan:
- resource_group: myResourceGroup
- name: myAppPlan
- location: eastus
- sku: S2
-'''
-
-RETURN = '''
-azure_appserviceplan:
- description: Facts about the current state of the app service plan.
- returned: always
- type: dict
- sample: {
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myAppPlan"
- }
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrestazure.azure_operation import AzureOperationPoller
- from msrest.serialization import Model
- from azure.mgmt.web.models import (
- app_service_plan, AppServicePlan, SkuDescription
- )
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def _normalize_sku(sku):
- if sku is None:
- return sku
-
- sku = sku.upper()
- if sku == 'FREE':
- return 'F1'
- elif sku == 'SHARED':
- return 'D1'
- return sku
-
-
-def get_sku_name(tier):
- tier = tier.upper()
- if tier == 'F1' or tier == "FREE":
- return 'FREE'
- elif tier == 'D1' or tier == "SHARED":
- return 'SHARED'
- elif tier in ['B1', 'B2', 'B3', 'BASIC']:
- return 'BASIC'
- elif tier in ['S1', 'S2', 'S3']:
- return 'STANDARD'
- elif tier in ['P1', 'P2', 'P3']:
- return 'PREMIUM'
- elif tier in ['P1V2', 'P2V2', 'P3V2']:
- return 'PREMIUMV2'
- else:
- return None
-
-
-def appserviceplan_to_dict(plan):
- return dict(
- id=plan.id,
- name=plan.name,
- kind=plan.kind,
- location=plan.location,
- reserved=plan.reserved,
- is_linux=plan.reserved,
- provisioning_state=plan.provisioning_state,
- status=plan.status,
- target_worker_count=plan.target_worker_count,
- sku=dict(
- name=plan.sku.name,
- size=plan.sku.size,
- tier=plan.sku.tier,
- family=plan.sku.family,
- capacity=plan.sku.capacity
- ),
- resource_group=plan.resource_group,
- number_of_sites=plan.number_of_sites,
- tags=plan.tags if plan.tags else None
- )
-
-
-class AzureRMAppServicePlans(AzureRMModuleBase):
- """Configuration class for an Azure RM App Service Plan resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- sku=dict(
- type='str'
- ),
- is_linux=dict(
- type='bool',
- default=False
- ),
- number_of_workers=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.name = None
- self.location = None
-
- self.sku = None
- self.is_linux = None
- self.number_of_workers = 1
-
- self.tags = None
-
- self.results = dict(
- changed=False,
- ansible_facts=dict(azure_appserviceplan=None)
- )
- self.state = None
-
- super(AzureRMAppServicePlans, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if kwargs[key]:
- setattr(self, key, kwargs[key])
-
- old_response = None
- response = None
- to_be_updated = False
-
- # set location
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- # get app service plan
- old_response = self.get_plan()
-
- # if not existing
- if not old_response:
- self.log("App Service plan doesn't exist")
-
- if self.state == "present":
- to_be_updated = True
-
- if not self.sku:
- self.fail('Please specify sku in plan when creation')
-
- else:
- # existing app service plan, do update
- self.log("App Service Plan already exists")
-
- if self.state == 'present':
- self.log('Result: {0}'.format(old_response))
-
- update_tags, newtags = self.update_tags(old_response.get('tags', dict()))
-
- if update_tags:
- to_be_updated = True
- self.tags = newtags
-
- # check if sku changed
- if self.sku and _normalize_sku(self.sku) != old_response['sku']['size']:
- to_be_updated = True
-
- # check if number_of_workers changed
- if self.number_of_workers and int(self.number_of_workers) != old_response['sku']['capacity']:
- to_be_updated = True
-
- if self.is_linux and self.is_linux != old_response['reserved']:
- self.fail("Operation not allowed: cannot update reserved of app service plan.")
-
- if old_response:
- self.results['id'] = old_response['id']
-
- if to_be_updated:
- self.log('Need to Create/Update app service plan')
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- response = self.create_or_update_plan()
- self.results['id'] = response['id']
-
- if self.state == 'absent' and old_response:
- self.log("Delete app service plan")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_plan()
-
- self.log('App service plan instance deleted')
-
- return self.results
-
- def get_plan(self):
- '''
- Gets app service plan
- :return: deserialized app service plan dictionary
- '''
- self.log("Get App Service Plan {0}".format(self.name))
-
- try:
- response = self.web_client.app_service_plans.get(self.resource_group, self.name)
- if response:
- self.log("Response : {0}".format(response))
- self.log("App Service Plan : {0} found".format(response.name))
-
- return appserviceplan_to_dict(response)
- except CloudError as ex:
- self.log("Didn't find app service plan {0} in resource group {1}".format(self.name, self.resource_group))
-
- return False
-
- def create_or_update_plan(self):
- '''
- Creates app service plan
- :return: deserialized app service plan dictionary
- '''
- self.log("Create App Service Plan {0}".format(self.name))
-
- try:
- # normalize sku
- sku = _normalize_sku(self.sku)
-
- sku_def = SkuDescription(tier=get_sku_name(
- sku), name=sku, capacity=self.number_of_workers)
- plan_def = AppServicePlan(
- location=self.location, app_service_plan_name=self.name, sku=sku_def, reserved=self.is_linux, tags=self.tags if self.tags else None)
-
- response = self.web_client.app_service_plans.create_or_update(self.resource_group, self.name, plan_def)
-
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
-
- self.log("Response : {0}".format(response))
-
- return appserviceplan_to_dict(response)
- except CloudError as ex:
- self.fail("Failed to create app service plan {0} in resource group {1}: {2}".format(self.name, self.resource_group, str(ex)))
-
- def delete_plan(self):
- '''
- Deletes specified App service plan in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the App service plan {0}".format(self.name))
- try:
- response = self.web_client.app_service_plans.delete(resource_group_name=self.resource_group,
- name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete App service plan.')
- self.fail(
- "Error deleting the App service plan : {0}".format(str(e)))
-
- return True
-
-
-def main():
- """Main execution"""
- AzureRMAppServicePlans()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_appserviceplan_info.py b/lib/ansible/modules/cloud/azure/azure_rm_appserviceplan_info.py
deleted file mode 100644
index 3309dc1773..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_appserviceplan_info.py
+++ /dev/null
@@ -1,241 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_appserviceplan_info
-
-version_added: "2.9"
-
-short_description: Get azure app service plan facts
-
-description:
- - Get facts for a specific app service plan or all app service plans in a resource group, or all app service plan in current subscription.
-
-options:
- name:
- description:
- - Only show results for a specific app service plan.
- resource_group:
- description:
- - Limit results by resource group.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yunge Zhu (@yungezz)
-'''
-
-EXAMPLES = '''
- - name: Get facts for app service plan by name
- azure_rm_appserviceplan_info:
- resource_group: myResourceGroup
- name: myAppServicePlan
-
- - name: Get azure_rm_appserviceplan_facts for app service plan in resource group
- azure_rm_appserviceplan_info:
- resource_group: myResourceGroup
-
- - name: Get facts for app service plan with tags
- azure_rm_appserviceplan_info:
- tags:
- - testtag
- - foo:bar
-'''
-
-RETURN = '''
-appserviceplans:
- description: List of app service plans.
- returned: always
- type: complex
- contains:
- id:
- description: Id of the app service plan.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myPlan
- name:
- description: Name of the app service plan.
- returned: always
- type: str
- resource_group:
- description: Resource group of the app service plan.
- returned: always
- type: str
- sample: myResourceGroup
- location:
- description: Location of the app service plan.
- returned: always
- type: str
- kind:
- description: Kind of the app service plan.
- returned: always
- type: str
- sample: app
- sku:
- description: Sku of the app service plan.
- returned: always
- type: complex
- contains:
- name:
- description: Name of sku.
- returned: always
- type: str
- sample: S1
- family:
- description: Family of sku.
- returned: always
- type: str
- sample: S
- size:
- description: Size of sku.
- returned: always
- type: str
- sample: S1
- tier:
- description: Tier of sku.
- returned: always
- type: str
- sample: Standard
- capacity:
- description: Capacity of sku.
- returned: always
- type: int
- sample: 1
-'''
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.common import AzureMissingResourceHttpError, AzureHttpError
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-AZURE_OBJECT_CLASS = 'AppServicePlan'
-
-
-class AzureRMAppServicePlanInfo(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str'),
- resource_group=dict(type='str'),
- tags=dict(type='list')
- )
-
- self.results = dict(changed=False)
-
- self.name = None
- self.resource_group = None
- self.tags = None
- self.info_level = None
-
- super(AzureRMAppServicePlanInfo, self).__init__(self.module_arg_spec,
- supports_tags=False,
- facts_module=True)
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_appserviceplan_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_appserviceplan_facts' module has been renamed to 'azure_rm_appserviceplan_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name:
- self.results['appserviceplans'] = self.list_by_name()
- elif self.resource_group:
- self.results['appserviceplans'] = self.list_by_resource_group()
- else:
- self.results['appserviceplans'] = self.list_all()
-
- return self.results
-
- def list_by_name(self):
- self.log('Get app service plan {0}'.format(self.name))
- item = None
- result = []
-
- try:
- item = self.web_client.app_service_plans.get(self.resource_group, self.name)
- except CloudError:
- pass
-
- if item and self.has_tags(item.tags, self.tags):
- curated_result = self.construct_curated_plan(item)
- result = [curated_result]
-
- return result
-
- def list_by_resource_group(self):
- self.log('List app service plans in resource groups {0}'.format(self.resource_group))
- try:
- response = list(self.web_client.app_service_plans.list_by_resource_group(self.resource_group))
- except CloudError as exc:
- self.fail("Error listing app service plan in resource groups {0} - {1}".format(self.resource_group, str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- curated_output = self.construct_curated_plan(item)
- results.append(curated_output)
- return results
-
- def list_all(self):
- self.log('List app service plans in current subscription')
- try:
- response = list(self.web_client.app_service_plans.list())
- except CloudError as exc:
- self.fail("Error listing app service plans: {0}".format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- curated_output = self.construct_curated_plan(item)
- results.append(curated_output)
- return results
-
- def construct_curated_plan(self, plan):
- plan_facts = self.serialize_obj(plan, AZURE_OBJECT_CLASS)
-
- curated_output = dict()
- curated_output['id'] = plan_facts['id']
- curated_output['name'] = plan_facts['name']
- curated_output['resource_group'] = plan_facts['properties']['resourceGroup']
- curated_output['location'] = plan_facts['location']
- curated_output['tags'] = plan_facts.get('tags', None)
- curated_output['is_linux'] = False
- curated_output['kind'] = plan_facts['kind']
- curated_output['sku'] = plan_facts['sku']
-
- if plan_facts['properties'].get('reserved', None):
- curated_output['is_linux'] = True
-
- return curated_output
-
-
-def main():
- AzureRMAppServicePlanInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_automationaccount.py b/lib/ansible/modules/cloud/azure/azure_rm_automationaccount.py
deleted file mode 100644
index 42628ea60e..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_automationaccount.py
+++ /dev/null
@@ -1,174 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_automationaccount
-version_added: "2.9"
-short_description: Manage Azure Automation account
-description:
- - Create, delete an Azure Automation account.
-options:
- resource_group:
- description:
- - Name of resource group.
- type: str
- required: true
- name:
- description:
- - Name of the automation account.
- type: str
- required: true
- state:
- description:
- - State of the automation account. Use C(present) to create or update a automation account and C(absent) to delete an automation account.
- type: str
- default: present
- choices:
- - absent
- - present
- location:
- description:
- - Location of the resource.
- - If not specified, use resource group location.
- type: str
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
-- name: Create an automation account
- azure_rm_automationaccount:
- name: Testing
- resource_group: myResourceGroup
-
-- name: Create an automation account
- azure_rm_automationaccount:
- name: Testing
- resource_group: myResourceGroup
- location: eastus
-'''
-
-RETURN = '''
-id:
- description:
- - Automation account resource ID.
- type: str
- returned: success
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Automation/automationAccounts/Testing"
-''' # NOQA
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-
-class AzureRMAutomationAccount(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- location=dict(type='str')
- )
-
- self.results = dict(
- changed=False,
- id=None
- )
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.location = None
-
- super(AzureRMAutomationAccount, self).__init__(self.module_arg_spec, supports_check_mode=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- account = self.get_account()
- changed = False
- if self.state == 'present':
- if not account:
- if not self.location:
- resource_group = self.get_resource_group(self.resource_group)
- self.location = resource_group.location
- param = self.automation_models.AutomationAccountCreateOrUpdateParameters(
- location=self.location,
- sku=self.automation_models.Sku(name='Basic'),
- tags=self.tags
- )
- changed = True
- if not self.check_mode:
- account = self.create_or_update(param)
- elif self.tags:
- update_tags, tags = self.update_tags(account.tags)
- if update_tags:
- changed = True
- param = self.automation_models.AutomationAccountUpdateParameters(
- tags=tags
- )
- changed = True
- if not self.check_mode:
- self.update_account_tags(param)
- if account:
- self.results['id'] = account.id
- elif account:
- changed = True
- if not self.check_mode:
- self.delete_account()
- self.results['changed'] = changed
- return self.results
-
- def get_account(self):
- try:
- return self.automation_client.automation_account.get(self.resource_group, self.name)
- except self.automation_models.ErrorResponseException:
- pass
-
- def create_or_update(self, param):
- try:
- return self.automation_client.automation_account.create_or_update(self.resource_group, self.name, param)
- except self.automation_models.ErrorResponseException as exc:
- self.fail('Error when creating automation account {0}: {1}'.format(self.name, exc.message))
-
- def update_account_tags(self, param):
- try:
- return self.automation_client.automation_account.update(self.resource_group, self.name, param)
- except self.automation_models.ErrorResponseException as exc:
- self.fail('Error when updating automation account {0}: {1}'.format(self.name, exc.message))
-
- def delete_account(self):
- try:
- return self.automation_client.automation_account.delete(self.resource_group, self.name)
- except self.automation_models.ErrorResponseException as exc:
- self.fail('Error when deleting automation account {0}: {1}'.format(self.name, exc.message))
-
-
-def main():
- AzureRMAutomationAccount()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_automationaccount_info.py b/lib/ansible/modules/cloud/azure/azure_rm_automationaccount_info.py
deleted file mode 100644
index 0cf854f56e..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_automationaccount_info.py
+++ /dev/null
@@ -1,383 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_automationaccount_info
-version_added: '2.9'
-short_description: Get Azure automation account facts
-description:
- - Get facts of automation account.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- type: str
- required: True
- name:
- description:
- - The name of the automation account.
- type: str
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- type: list
- list_statistics:
- description:
- - List statistics details for a automation account.
- - Note this will cost network overhead, suggest only used when I(name) set.
- type: bool
- list_usages:
- description:
- - List usage details for a automation account.
- - Note this will cost network overhead, suggest only used when I(name) set.
- type: bool
- list_keys:
- description:
- - List keys for a automation account.
- - Note this will cost network overhead, suggest only used when I(name) set.
- type: bool
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
-- name: Get details of an automation account
- azure_rm_automationaccount_info:
- name: Testing
- resource_group: myResourceGroup
- list_statistics: yes
- list_usages: yes
- list_keys: yes
-
-- name: List automation account in a resource group
- azure_rm_automationaccount_info:
- resource_group: myResourceGroup
-
-- name: List automation account in a resource group
- azure_rm_automationaccount_info:
-'''
-
-RETURN = '''
-automation_accounts:
- description:
- - List of automation account dicts.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- type: str
- returned: always
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups
- /myResourceGroup/providers/Microsoft.Automation/automationAccounts/Testing"
- resource_group:
- description:
- - Resource group name.
- type: str
- returned: always
- sample: myResourceGroup
- name:
- description:
- - Resource name.
- type: str
- returned: always
- sample: Testing
- location:
- description:
- - Resource location.
- type: str
- returned: always
- sample: eastus
- creation_time:
- description:
- - Resource creation date time.
- type: str
- returned: always
- sample: "2019-04-26T02:55:16.500Z"
- last_modified_time:
- description:
- - Resource last modified date time.
- type: str
- returned: always
- sample: "2019-04-26T02:55:16.500Z"
- state:
- description:
- - Resource state.
- type: str
- returned: always
- sample: ok
- keys:
- description:
- - Resource keys.
- type: complex
- returned: always
- contains:
- key_name:
- description:
- - Name of the key.
- type: str
- returned: always
- sample: Primary
- permissions:
- description:
- - Permission of the key.
- type: str
- returned: always
- sample: Full
- value:
- description:
- - Value of the key.
- type: str
- returned: always
- sample: "MbepKTO6IyGwml0GaKBkKN"
- statistics:
- description:
- - Resource statistics.
- type: complex
- returned: always
- contains:
- counter_property:
- description:
- - Property value of the statistic.
- type: str
- returned: always
- sample: New
- counter_value:
- description:
- - Value of the statistic.
- type: int
- returned: always
- sample: 0
- end_time:
- description:
- - EndTime of the statistic.
- type: str
- returned: always
- sample: "2019-04-26T06:29:43.587518Z"
- id:
- description:
- - ID of the statistic.
- type: str
- returned: always
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups
- /myResourceGroup/providers/Microsoft.Automation/automationAccounts/Testing/statistics/New"
- start_time:
- description:
- - StartTime of the statistic.
- type: str
- returned: always
- sample: "2019-04-26T06:29:43.587518Z"
- usages:
- description:
- - Resource usages.
- type: complex
- returned: always
- contains:
- current_value:
- description:
- - Current usage.
- type: float
- returned: always
- sample: 0.0
- limit:
- description:
- - Max limit, C(-1) for unlimited.
- type: int
- returned: always
- sample: -1
- name:
- description:
- - Usage counter name.
- type: complex
- returned: always
- contains:
- localized_value:
- description:
- - Localized name.
- type: str
- returned: always
- sample: "SubscriptionUsage"
- value:
- description:
- - Name value.
- type: str
- returned: always
- sample: "SubscriptionUsage"
- unit:
- description:
- - Usage unit name.
- type: str
- returned: always
- sample: "Minute"
- throttle_status:
- description:
- - Usage throttle status.
- type: str
- returned: always
- sample: "NotThrottled"
-
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.tools import parse_resource_id
-except ImportError:
- pass
-
-
-class AzureRMAutomationAccountInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- ),
- list_statistics=dict(
- type='bool'
- ),
- list_usages=dict(
- type='bool'
- ),
- list_keys=dict(
- type='bool'
- )
- )
- # store the results of the module operation
- self.results = dict()
- self.resource_group = None
- self.name = None
- self.tags = None
- self.list_statistics = None
- self.list_usages = None
- self.list_keys = None
-
- super(AzureRMAutomationAccountInfo, self).__init__(self.module_arg_spec, supports_tags=False, facts_module=True)
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_automationaccount_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_automationaccount_facts' module has been renamed to 'azure_rm_automationaccount_info'", version='2.13')
-
- for key in list(self.module_arg_spec):
- setattr(self, key, kwargs[key])
-
- if self.resource_group and self.name:
- accounts = [self.get()]
- elif self.resource_group:
- accounts = self.list_by_resource_group()
- else:
- accounts = self.list_all()
- self.results['automation_accounts'] = [self.to_dict(x) for x in accounts if self.has_tags(x.tags, self.tags)]
- return self.results
-
- def to_dict(self, account):
- if not account:
- return None
- id_dict = parse_resource_id(account.id)
- result = account.as_dict()
- result['resource_group'] = id_dict['resource_group']
- if self.list_statistics:
- result['statistics'] = self.get_statics(id_dict['resource_group'], account.name)
- if self.list_usages:
- result['usages'] = self.get_usages(id_dict['resource_group'], account.name)
- if self.list_keys:
- result['keys'] = self.list_account_keys(id_dict['resource_group'], account.name)
- return result
-
- def get(self):
- try:
- return self.automation_client.automation_account.get(self.resource_group, self.name)
- except self.automation_models.ErrorResponseException as exc:
- self.fail('Error when getting automation account {0}: {1}'.format(self.name, exc.message))
-
- def list_by_resource_group(self):
- result = []
- try:
- resp = self.automation_client.automation_account.list_by_resource_group(self.resource_group)
- while True:
- result.append(resp.next())
- except StopIteration:
- pass
- except self.automation_models.ErrorResponseException as exc:
- self.fail('Error when listing automation account in resource group {0}: {1}'.format(self.resource_group, exc.message))
- return result
-
- def list_all(self):
- result = []
- try:
- resp = self.automation_client.automation_account.list()
- while True:
- result.append(resp.next())
- except StopIteration:
- pass
- except self.automation_models.ErrorResponseException as exc:
- self.fail('Error when listing automation account: {0}'.format(exc.message))
- return result
-
- def get_statics(self, resource_group, name):
- result = []
- try:
- resp = self.automation_client.statistics.list_by_automation_account(resource_group, name)
- while True:
- result.append(resp.next().as_dict())
- except StopIteration:
- pass
- except self.automation_models.ErrorResponseException as exc:
- self.fail('Error when getting statics for automation account {0}/{1}: {2}'.format(resource_group, name, exc.message))
- return result
-
- def get_usages(self, resource_group, name):
- result = []
- try:
- resp = self.automation_client.usages.list_by_automation_account(resource_group, name)
- while True:
- result.append(resp.next().as_dict())
- except StopIteration:
- pass
- except self.automation_models.ErrorResponseException as exc:
- self.fail('Error when getting usage for automation account {0}/{1}: {2}'.format(resource_group, name, exc.message))
- return result
-
- def list_account_keys(self, resource_group, name):
- try:
- resp = self.automation_client.keys.list_by_automation_account(resource_group, name)
- return [x.as_dict() for x in resp.keys]
- except self.automation_models.ErrorResponseException as exc:
- self.fail('Error when listing keys for automation account {0}/{1}: {2}'.format(resource_group, name, exc.message))
-
-
-def main():
- AzureRMAutomationAccountInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_autoscale.py b/lib/ansible/modules/cloud/azure/azure_rm_autoscale.py
deleted file mode 100644
index f9a59bd6d0..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_autoscale.py
+++ /dev/null
@@ -1,649 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_autoscale
-version_added: "2.7"
-short_description: Manage Azure autoscale setting
-description:
- - Create, delete an autoscale setting.
-options:
- target:
- description:
- - The identifier of the resource to apply autoscale setting.
- - It could be the resource id string.
- - It also could be a dict contains the C(name), C(subscription_id), C(namespace), C(types), C(resource_group) of the resource.
- resource_group:
- required: true
- description:
- - Resource group of the resource.
- enabled:
- type: bool
- description:
- - Specifies whether automatic scaling is enabled for the resource.
- default: true
- profiles:
- description:
- - The collection of automatic scaling profiles that specify different scaling parameters for different time periods.
- - A maximum of 20 profiles can be specified.
- suboptions:
- name:
- required: true
- description:
- - The name of the profile.
- count:
- required: true
- description:
- - The number of instances that will be set if metrics are not available for evaluation.
- - The default is only used if the current instance count is lower than the default.
- min_count:
- description:
- - The minimum number of instances for the resource.
- max_count:
- description:
- - The maximum number of instances for the resource.
- - The actual maximum number of instances is limited by the cores that are available in the subscription.
- recurrence_frequency:
- default: None
- description:
- - How often the schedule profile should take effect.
- - If this value is C(Week), meaning each week will have the same set of profiles.
- - This element is not used if the FixedDate element is used.
- choices:
- - None
- - Second
- - Minute
- - Hour
- - Day
- - Week
- - Month
- - Year
- recurrence_timezone:
- description:
- - The timezone of repeating times at which this profile begins.
- - This element is not used if the FixedDate element is used.
- recurrence_days:
- description:
- - The days of repeating times at which this profile begins.
- - This element is not used if the FixedDate element is used.
- recurrence_hours:
- description:
- - The hours of repeating times at which this profile begins.
- - This element is not used if the FixedDate element is used.
- recurrence_mins:
- description:
- - The mins of repeating times at which this profile begins.
- - This element is not used if the FixedDate element is used.
- fixed_date_timezone:
- description:
- - The specific date-time timezone for the profile.
- - This element is not used if the Recurrence element is used.
- fixed_date_start:
- description:
- - The specific date-time start for the profile.
- - This element is not used if the Recurrence element is used.
- fixed_date_end:
- description:
- - The specific date-time end for the profile.
- - This element is not used if the Recurrence element is used.
- rules:
- description:
- - The collection of rules that provide the triggers and parameters for the scaling action.
- - A maximum of 10 rules can be specified.
- suboptions:
- time_aggregation:
- default: Average
- description:
- - How the data that is collected should be combined over time.
- choices:
- - Average
- - Minimum
- - Maximum
- - Total
- - Count
- time_window:
- required: true
- description:
- - The range of time(minutes) in which instance data is collected.
- - This value must be greater than the delay in metric collection, which can vary from resource-to-resource.
- - Must be between 5 ~ 720.
- direction:
- description:
- - Whether the scaling action increases or decreases the number of instances.
- choices:
- - Increase
- - Decrease
- metric_name:
- required: true
- description:
- - The name of the metric that defines what the rule monitors.
- metric_resource_uri:
- description:
- - The resource identifier of the resource the rule monitors.
- value:
- description:
- - The number of instances that are involved in the scaling action.
- - This value must be 1 or greater.
- operator:
- default: GreaterThan
- description:
- - The operator that is used to compare the metric data and the threshold.
- choices:
- - Equals
- - NotEquals
- - GreaterThan
- - GreaterThanOrEqual
- - LessThan
- - LessThanOrEqual
- cooldown:
- description:
- - The amount of time (minutes) to wait since the last scaling action before this action occurs.
- - It must be between 1 ~ 10080.
- time_grain:
- required: true
- description:
- - The granularity(minutes) of metrics the rule monitors.
- - Must be one of the predefined values returned from metric definitions for the metric.
- - Must be between 1 ~ 720.
- statistic:
- default: Average
- description:
- - How the metrics from multiple instances are combined.
- choices:
- - Average
- - Min
- - Max
- - Sum
- threshold:
- default: 70
- description:
- - The threshold of the metric that triggers the scale action.
- type:
- description:
- - The type of action that should occur when the scale rule fires.
- choices:
- - PercentChangeCount
- - ExactCount
- - ChangeCount
- notifications:
- description:
- - The collection of notifications.
- suboptions:
- custom_emails:
- description:
- - The custom e-mails list. This value can be null or empty, in which case this attribute will be ignored.
- send_to_subscription_administrator:
- type: bool
- description:
- - A value indicating whether to send email to subscription administrator.
- webhooks:
- description:
- - The list of webhook notifications service uri.
- send_to_subscription_co_administrators:
- type: bool
- description:
- - A value indicating whether to send email to subscription co-administrators.
- state:
- default: present
- description:
- - Assert the state of the virtual network. Use C(present) to create or update and C(absent) to delete.
- choices:
- - present
- - absent
- location:
- description:
- - location of the resource.
- name:
- required: true
- description:
- - name of the resource.
-
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
-- name: Create an auto scale
- azure_rm_autoscale:
- target: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScaleSets/myVmss"
- enabled: true
- profiles:
- - count: '1'
- recurrence_days:
- - Monday
- name: Auto created scale condition
- recurrence_timezone: China Standard Time
- recurrence_mins:
- - '0'
- min_count: '1'
- max_count: '1'
- recurrence_frequency: Week
- recurrence_hours:
- - '18'
- name: scale
- resource_group: myResourceGroup
-
-- name: Create an auto scale with complicated profile
- azure_rm_autoscale:
- target: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScaleSets
- /myVmss"
- enabled: true
- profiles:
- - count: '1'
- recurrence_days:
- - Monday
- name: Auto created scale condition 0
- rules:
- - Time_aggregation: Average
- time_window: 10
- direction: Increase
- metric_name: Percentage CPU
- metric_resource_uri: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtua
- lMachineScaleSets/vmss"
- value: '1'
- threshold: 70
- cooldown: 5
- time_grain: 1
- statistic: Average
- operator: GreaterThan
- type: ChangeCount
- max_count: '1'
- recurrence_mins:
- - '0'
- min_count: '1'
- recurrence_timezone: China Standard Time
- recurrence_frequency: Week
- recurrence_hours:
- - '6'
- notifications:
- - email_admin: True
- email_co_admin: False
- custom_emails:
- - yuwzho@microsoft.com
- name: scale
- resource_group: myResourceGroup
-
-- name: Delete an Azure Auto Scale Setting
- azure_rm_autoscale:
- state: absent
- resource_group: myResourceGroup
- name: scale
-'''
-
-RETURN = '''
-state:
- description: Current state of the resource.
- returned: always
- type: dict
- sample: {
- "changed": false,
- "enabled": true,
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/microsoft.insights/autoscalesettings/scale",
- "location": "eastus",
- "name": "scale",
- "notifications": [
- {
- "custom_emails": [
- "yuwzho@microsoft.com"
- ],
- "send_to_subscription_administrator": true,
- "send_to_subscription_co_administrators": false,
- "webhooks": []
- }
- ],
- "profiles": [
- {
- "count": "1",
- "max_count": "1",
- "min_count": "1",
- "name": "Auto created scale condition 0",
- "recurrence_days": [
- "Monday"
- ],
- "recurrence_frequency": "Week",
- "recurrence_hours": [
- "6"
- ],
- "recurrence_mins": [
- "0"
- ],
- "recurrence_timezone": "China Standard Time",
- "rules": [
- {
- "cooldown": 5.0,
- "direction": "Increase",
- "metric_name": "Percentage CPU",
- "metric_resource_uri": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsof
- t.Compute/virtualMachineScaleSets/MyVmss",
- "operator": "GreaterThan",
- "statistic": "Average",
- "threshold": 70.0,
- "time_aggregation": "Average",
- "time_grain": 1.0,
- "time_window": 10.0,
- "type": "ChangeCount",
- "value": "1"
- }
- ]
- }
- ],
- "target": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScale
- Sets/myVmss"
- }
-''' # NOQA
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
-from ansible.module_utils._text import to_native
-from datetime import timedelta
-
-try:
- from msrestazure.tools import parse_resource_id
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.monitor.models import WebhookNotification, EmailNotification, AutoscaleNotification, RecurrentSchedule, MetricTrigger, \
- ScaleAction, AutoscaleSettingResource, AutoscaleProfile, ScaleCapacity, TimeWindow, Recurrence, ScaleRule
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-# duplicated in azure_rm_autoscale_facts
-def timedelta_to_minutes(time):
- if not time:
- return 0
- return time.days * 1440 + time.seconds / 60.0 + time.microseconds / 60000000.0
-
-
-def get_enum_value(item):
- if 'value' in dir(item):
- return to_native(item.value)
- return to_native(item)
-
-
-def auto_scale_to_dict(instance):
- if not instance:
- return dict()
- return dict(
- id=to_native(instance.id or ''),
- name=to_native(instance.name),
- location=to_native(instance.location),
- profiles=[profile_to_dict(p) for p in instance.profiles or []],
- notifications=[notification_to_dict(n) for n in instance.notifications or []],
- enabled=instance.enabled,
- target=to_native(instance.target_resource_uri),
- tags=instance.tags
- )
-
-
-def rule_to_dict(rule):
- if not rule:
- return dict()
- result = dict(metric_name=to_native(rule.metric_trigger.metric_name),
- metric_resource_uri=to_native(rule.metric_trigger.metric_resource_uri),
- time_grain=timedelta_to_minutes(rule.metric_trigger.time_grain),
- statistic=get_enum_value(rule.metric_trigger.statistic),
- time_window=timedelta_to_minutes(rule.metric_trigger.time_window),
- time_aggregation=get_enum_value(rule.metric_trigger.time_aggregation),
- operator=get_enum_value(rule.metric_trigger.operator),
- threshold=float(rule.metric_trigger.threshold))
- if rule.scale_action and to_native(rule.scale_action.direction) != 'None':
- result['direction'] = get_enum_value(rule.scale_action.direction)
- result['type'] = get_enum_value(rule.scale_action.type)
- result['value'] = to_native(rule.scale_action.value)
- result['cooldown'] = timedelta_to_minutes(rule.scale_action.cooldown)
- return result
-
-
-def profile_to_dict(profile):
- if not profile:
- return dict()
- result = dict(name=to_native(profile.name),
- count=to_native(profile.capacity.default),
- max_count=to_native(profile.capacity.maximum),
- min_count=to_native(profile.capacity.minimum))
-
- if profile.rules:
- result['rules'] = [rule_to_dict(r) for r in profile.rules]
- if profile.fixed_date:
- result['fixed_date_timezone'] = profile.fixed_date.time_zone
- result['fixed_date_start'] = profile.fixed_date.start
- result['fixed_date_end'] = profile.fixed_date.end
- if profile.recurrence:
- if get_enum_value(profile.recurrence.frequency) != 'None':
- result['recurrence_frequency'] = get_enum_value(profile.recurrence.frequency)
- if profile.recurrence.schedule:
- result['recurrence_timezone'] = to_native(str(profile.recurrence.schedule.time_zone))
- result['recurrence_days'] = [to_native(r) for r in profile.recurrence.schedule.days]
- result['recurrence_hours'] = [to_native(r) for r in profile.recurrence.schedule.hours]
- result['recurrence_mins'] = [to_native(r) for r in profile.recurrence.schedule.minutes]
- return result
-
-
-def notification_to_dict(notification):
- if not notification:
- return dict()
- return dict(send_to_subscription_administrator=notification.email.send_to_subscription_administrator if notification.email else False,
- send_to_subscription_co_administrators=notification.email.send_to_subscription_co_administrators if notification.email else False,
- custom_emails=[to_native(e) for e in notification.email.custom_emails or []],
- webhooks=[to_native(w.service_url) for w in notification.webhooks or []])
-
-
-rule_spec = dict(
- metric_name=dict(type='str', required=True),
- metric_resource_uri=dict(type='str'),
- time_grain=dict(type='float', required=True),
- statistic=dict(type='str', choices=['Average', 'Min', 'Max', 'Sum'], default='Average'),
- time_window=dict(type='float', required=True),
- time_aggregation=dict(type='str', choices=['Average', 'Minimum', 'Maximum', 'Total', 'Count'], default='Average'),
- operator=dict(type='str',
- choices=['Equals', 'NotEquals', 'GreaterThan', 'GreaterThanOrEqual', 'LessThan', 'LessThanOrEqual'],
- default='GreaterThan'),
- threshold=dict(type='float', default=70),
- direction=dict(type='str', choices=['Increase', 'Decrease']),
- type=dict(type='str', choices=['PercentChangeCount', 'ExactCount', 'ChangeCount']),
- value=dict(type='str'),
- cooldown=dict(type='float')
-)
-
-
-profile_spec = dict(
- name=dict(type='str', required=True),
- count=dict(type='str', required=True),
- max_count=dict(type='str'),
- min_count=dict(type='str'),
- rules=dict(type='list', elements='dict', options=rule_spec),
- fixed_date_timezone=dict(type='str'),
- fixed_date_start=dict(type='str'),
- fixed_date_end=dict(type='str'),
- recurrence_frequency=dict(type='str', choices=['None', 'Second', 'Minute', 'Hour', 'Day', 'Week', 'Month', 'Year'], default='None'),
- recurrence_timezone=dict(type='str'),
- recurrence_days=dict(type='list', elements='str'),
- recurrence_hours=dict(type='list', elements='str'),
- recurrence_mins=dict(type='list', elements='str')
-)
-
-
-notification_spec = dict(
- send_to_subscription_administrator=dict(type='bool', aliases=['email_admin'], default=False),
- send_to_subscription_co_administrators=dict(type='bool', aliases=['email_co_admin'], default=False),
- custom_emails=dict(type='list', elements='str'),
- webhooks=dict(type='list', elements='str')
-)
-
-
-class AzureRMAutoScale(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- location=dict(type='str'),
- target=dict(type='raw'),
- profiles=dict(type='list', elements='dict', options=profile_spec),
- enabled=dict(type='bool', default=True),
- notifications=dict(type='list', elements='dict', options=notification_spec)
- )
-
- self.results = dict(
- changed=False
- )
-
- required_if = [
- ('state', 'present', ['target', 'profiles'])
- ]
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.location = None
- self.tags = None
- self.target = None
- self.profiles = None
- self.notifications = None
- self.enabled = None
-
- super(AzureRMAutoScale, self).__init__(self.module_arg_spec, supports_check_mode=True, required_if=required_if)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- results = None
- changed = False
-
- self.log('Fetching auto scale settings {0}'.format(self.name))
- results = self.get_auto_scale()
- if results and self.state == 'absent':
- # delete
- changed = True
- if not self.check_mode:
- self.delete_auto_scale()
- elif self.state == 'present':
-
- if not self.location:
- # Set default location
- resource_group = self.get_resource_group(self.resource_group)
- self.location = resource_group.location
-
- resource_id = self.target
- if isinstance(self.target, dict):
- resource_id = format_resource_id(val=self.target['name'],
- subscription_id=self.target.get('subscription_id') or self.subscription_id,
- namespace=self.target['namespace'],
- types=self.target['types'],
- resource_group=self.target.get('resource_group') or self.resource_group)
- self.target = resource_id
- resource_name = self.name
-
- def create_rule_instance(params):
- rule = params.copy()
- rule['metric_resource_uri'] = rule.get('metric_resource_uri', self.target)
- rule['time_grain'] = timedelta(minutes=rule.get('time_grain', 0))
- rule['time_window'] = timedelta(minutes=rule.get('time_window', 0))
- rule['cooldown'] = timedelta(minutes=rule.get('cooldown', 0))
- return ScaleRule(metric_trigger=MetricTrigger(**rule), scale_action=ScaleAction(**rule))
-
- profiles = [AutoscaleProfile(name=p.get('name'),
- capacity=ScaleCapacity(minimum=p.get('min_count'),
- maximum=p.get('max_count'),
- default=p.get('count')),
- rules=[create_rule_instance(r) for r in p.get('rules') or []],
- fixed_date=TimeWindow(time_zone=p.get('fixed_date_timezone'),
- start=p.get('fixed_date_start'),
- end=p.get('fixed_date_end')) if p.get('fixed_date_timezone') else None,
- recurrence=Recurrence(frequency=p.get('recurrence_frequency'),
- schedule=(RecurrentSchedule(time_zone=p.get('recurrence_timezone'),
- days=p.get('recurrence_days'),
- hours=p.get('recurrence_hours'),
- minutes=p.get('recurrence_mins'))))
- if p.get('recurrence_frequency') and p['recurrence_frequency'] != 'None' else None)
- for p in self.profiles or []]
-
- notifications = [AutoscaleNotification(email=EmailNotification(**n),
- webhooks=[WebhookNotification(service_uri=w) for w in n.get('webhooks') or []])
- for n in self.notifications or []]
-
- if not results:
- # create new
- changed = True
- else:
- # check changed
- resource_name = results.autoscale_setting_resource_name or self.name
- update_tags, tags = self.update_tags(results.tags)
- if update_tags:
- changed = True
- self.tags = tags
- if self.target != results.target_resource_uri:
- changed = True
- if self.enabled != results.enabled:
- changed = True
- profile_result_set = set([str(profile_to_dict(p)) for p in results.profiles or []])
- if profile_result_set != set([str(profile_to_dict(p)) for p in profiles]):
- changed = True
- notification_result_set = set([str(notification_to_dict(n)) for n in results.notifications or []])
- if notification_result_set != set([str(notification_to_dict(n)) for n in notifications]):
- changed = True
- if changed:
- # construct the instance will be send to create_or_update api
- results = AutoscaleSettingResource(location=self.location,
- tags=self.tags,
- profiles=profiles,
- notifications=notifications,
- enabled=self.enabled,
- autoscale_setting_resource_name=resource_name,
- target_resource_uri=self.target)
- if not self.check_mode:
- results = self.create_or_update_auto_scale(results)
- # results should be the dict of the instance
- self.results = auto_scale_to_dict(results)
- self.results['changed'] = changed
- return self.results
-
- def get_auto_scale(self):
- try:
- return self.monitor_client.autoscale_settings.get(self.resource_group, self.name)
- except Exception as exc:
- self.log('Error: failed to get auto scale settings {0} - {1}'.format(self.name, str(exc)))
- return None
-
- def create_or_update_auto_scale(self, param):
- try:
- return self.monitor_client.autoscale_settings.create_or_update(self.resource_group, self.name, param)
- except Exception as exc:
- self.fail("Error creating auto scale settings {0} - {1}".format(self.name, str(exc)))
-
- def delete_auto_scale(self):
- self.log('Deleting auto scale settings {0}'.format(self.name))
- try:
- return self.monitor_client.autoscale_settings.delete(self.resource_group, self.name)
- except Exception as exc:
- self.fail("Error deleting auto scale settings {0} - {1}".format(self.name, str(exc)))
-
-
-def main():
- AzureRMAutoScale()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_autoscale_info.py b/lib/ansible/modules/cloud/azure/azure_rm_autoscale_info.py
deleted file mode 100644
index 995556ac58..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_autoscale_info.py
+++ /dev/null
@@ -1,271 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_autoscale_info
-version_added: "2.9"
-short_description: Get Azure Auto Scale Setting facts
-description:
- - Get facts of Auto Scale Setting.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- name:
- description:
- - The name of the Auto Scale Setting.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of Auto Scale Setting
- azure_rm_autoscale_info:
- resource_group: myResourceGroup
- name: auto_scale_name
-
- - name: List instances of Auto Scale Setting
- azure_rm_autoscale_info:
- resource_group: myResourceGroup
-'''
-
-RETURN = '''
-autoscales:
- description: List of Azure Scale Settings dicts.
- returned: always
- type: list
- sample: [{
- "enabled": true,
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/microsoft.insights/autoscalesettings/scale",
- "location": "eastus",
- "name": "scale",
- "notifications": [
- {
- "custom_emails": [
- "yuwzho@microsoft.com"
- ],
- "send_to_subscription_administrator": true,
- "send_to_subscription_co_administrators": false,
- "webhooks": []
- }
- ],
- "profiles": [
- {
- "count": "1",
- "max_count": "1",
- "min_count": "1",
- "name": "Auto created scale condition 0",
- "recurrence_days": [
- "Monday"
- ],
- "recurrence_frequency": "Week",
- "recurrence_hours": [
- "6"
- ],
- "recurrence_mins": [
- "0"
- ],
- "recurrence_timezone": "China Standard Time",
- "rules": [
- {
- "cooldown": 5.0,
- "direction": "Increase",
- "metric_name": "Percentage CPU",
- "metric_resource_uri": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsof
- t.Compute/virtualMachineScaleSets/myVmss",
- "operator": "GreaterThan",
- "statistic": "Average",
- "threshold": 70.0,
- "time_aggregation": "Average",
- "time_grain": 1.0,
- "time_window": 10.0,
- "type": "ChangeCount",
- "value": "1"
- }
- ]
- }
- ],
- "target": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScale
- Sets/myVmss"
- }]
-
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils._text import to_native
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-# duplicated in azure_rm_autoscale
-def timedelta_to_minutes(time):
- if not time:
- return 0
- return time.days * 1440 + time.seconds / 60.0 + time.microseconds / 60000000.0
-
-
-def get_enum_value(item):
- if 'value' in dir(item):
- return to_native(item.value)
- return to_native(item)
-
-
-def auto_scale_to_dict(instance):
- if not instance:
- return dict()
- return dict(
- id=to_native(instance.id or ''),
- name=to_native(instance.name),
- location=to_native(instance.location),
- profiles=[profile_to_dict(p) for p in instance.profiles or []],
- notifications=[notification_to_dict(n) for n in instance.notifications or []],
- enabled=instance.enabled,
- target=to_native(instance.target_resource_uri),
- tags=instance.tags
- )
-
-
-def rule_to_dict(rule):
- if not rule:
- return dict()
- result = dict(metric_name=to_native(rule.metric_trigger.metric_name),
- metric_resource_uri=to_native(rule.metric_trigger.metric_resource_uri),
- time_grain=timedelta_to_minutes(rule.metric_trigger.time_grain),
- statistic=get_enum_value(rule.metric_trigger.statistic),
- time_window=timedelta_to_minutes(rule.metric_trigger.time_window),
- time_aggregation=get_enum_value(rule.metric_trigger.time_aggregation),
- operator=get_enum_value(rule.metric_trigger.operator),
- threshold=float(rule.metric_trigger.threshold))
- if rule.scale_action and to_native(rule.scale_action.direction) != 'None':
- result['direction'] = get_enum_value(rule.scale_action.direction)
- result['type'] = get_enum_value(rule.scale_action.type)
- result['value'] = to_native(rule.scale_action.value)
- result['cooldown'] = timedelta_to_minutes(rule.scale_action.cooldown)
- return result
-
-
-def profile_to_dict(profile):
- if not profile:
- return dict()
- result = dict(name=to_native(profile.name),
- count=to_native(profile.capacity.default),
- max_count=to_native(profile.capacity.maximum),
- min_count=to_native(profile.capacity.minimum))
-
- if profile.rules:
- result['rules'] = [rule_to_dict(r) for r in profile.rules]
- if profile.fixed_date:
- result['fixed_date_timezone'] = profile.fixed_date.time_zone
- result['fixed_date_start'] = profile.fixed_date.start
- result['fixed_date_end'] = profile.fixed_date.end
- if profile.recurrence:
- if get_enum_value(profile.recurrence.frequency) != 'None':
- result['recurrence_frequency'] = get_enum_value(profile.recurrence.frequency)
- if profile.recurrence.schedule:
- result['recurrence_timezone'] = to_native(str(profile.recurrence.schedule.time_zone))
- result['recurrence_days'] = [to_native(r) for r in profile.recurrence.schedule.days]
- result['recurrence_hours'] = [to_native(r) for r in profile.recurrence.schedule.hours]
- result['recurrence_mins'] = [to_native(r) for r in profile.recurrence.schedule.minutes]
- return result
-
-
-def notification_to_dict(notification):
- if not notification:
- return dict()
- return dict(send_to_subscription_administrator=notification.email.send_to_subscription_administrator if notification.email else False,
- send_to_subscription_co_administrators=notification.email.send_to_subscription_co_administrators if notification.email else False,
- custom_emails=[to_native(e) for e in notification.email.custom_emails or []],
- webhooks=[to_native(w.service_url) for w in notification.webhooks or []])
-
-
-class AzureRMAutoScaleInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- )
- )
- # store the results of the module operation
- self.results = dict()
- self.resource_group = None
- self.name = None
- self.tags = None
-
- super(AzureRMAutoScaleInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_autoscale_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_autoscale_facts' module has been renamed to 'azure_rm_autoscale_info'", version='2.13')
-
- for key in list(self.module_arg_spec):
- setattr(self, key, kwargs[key])
-
- if self.resource_group and self.name:
- self.results['autoscales'] = self.get()
- elif self.resource_group:
- self.results['autoscales'] = self.list_by_resource_group()
- return self.results
-
- def get(self):
- result = []
- try:
- instance = self.monitor_client.autoscale_settings.get(self.resource_group, self.name)
- result = [auto_scale_to_dict(instance)]
- except Exception as ex:
- self.log('Could not get facts for autoscale {0} - {1}.'.format(self.name, str(ex)))
- return result
-
- def list_by_resource_group(self):
- results = []
- try:
- response = self.monitor_client.autoscale_settings.list_by_resource_group(self.resource_group)
- results = [auto_scale_to_dict(item) for item in response if self.has_tags(item.tags, self.tags)]
- except Exception as ex:
- self.log('Could not get facts for autoscale {0} - {1}.'.format(self.name, str(ex)))
- return results
-
-
-def main():
- AzureRMAutoScaleInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_availabilityset.py b/lib/ansible/modules/cloud/azure/azure_rm_availabilityset.py
deleted file mode 100644
index 993478ee6b..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_availabilityset.py
+++ /dev/null
@@ -1,346 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Julien Stroheker, <juliens@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_availabilityset
-
-version_added: "2.4"
-
-short_description: Manage Azure Availability Set
-
-description:
- - Create, update and delete Azure Availability Set.
- - An availability set cannot be updated, you will have to recreate one instead.
- - The only update operation will be for the tags.
-
-options:
- resource_group:
- description:
- - Name of a resource group where the availability set exists or will be created.
- required: true
- name:
- description:
- - Name of the availability set.
- required: true
- state:
- description:
- - Assert the state of the availability set.
- - Use C(present) to create or update a availability set and C(absent) to delete a availability set.
- default: present
- choices:
- - absent
- - present
- location:
- description:
- - Valid Azure location. Defaults to location of the resource group.
- platform_update_domain_count:
- description:
- - Update domains indicate groups of virtual machines and underlying physical hardware that can be rebooted at the same time.
- type: int
- default: 5
- platform_fault_domain_count:
- description:
- - Fault domains define the group of virtual machines that share a common power source and network switch.
- - Should be between C(1) and C(3).
- type: int
- default: 3
- sku:
- description:
- - Define if the availability set supports managed disks.
- default: Classic
- choices:
- - Classic
- - Aligned
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Julien Stroheker (@julienstroheker)
-'''
-
-EXAMPLES = '''
- - name: Create an availability set with default options
- azure_rm_availabilityset:
- name: myAvailabilitySet
- location: eastus
- resource_group: myResourceGroup
-
- - name: Create an availability set with advanced options
- azure_rm_availabilityset:
- name: myAvailabilitySet
- location: eastus
- resource_group: myResourceGroup
- platform_update_domain_count: 5
- platform_fault_domain_count: 3
- sku: Aligned
-
- - name: Delete an availability set
- azure_rm_availabilityset:
- name: myAvailabilitySet
- location: eastus
- resource_group: myResourceGroup
- state: absent
-'''
-
-RETURN = '''
-state:
- description: Current state of the availability set.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- type: str
- sample: "/subscriptions/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/availabilitySets/myavailabilityset2"
- location:
- description:
- - Location where the resource lives.
- type: str
- sample: eastus
- name:
- description:
- - Resource name.
- type: str
- sample: myavailabilityset2
- platform_fault_domain_count:
- description:
- - Fault domains values.
- type: int
- sample: 2
- platform_update_domain_count:
- description:
- - Update domains values.
- type: int
- sample: 5
- sku:
- description:
- - The availability set supports managed disks.
- type: str
- sample: Aligned
- tags:
- description:
- - Resource tags.
- type: dict
- sample: {env: sandbox}
-
-changed:
- description: Whether or not the resource has changed
- returned: always
- type: bool
- sample: true
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def availability_set_to_dict(avaset):
- '''
- Serializing the availability set from the API to Dict
- :return: dict
- '''
- return dict(
- id=avaset.id,
- name=avaset.name,
- location=avaset.location,
- platform_update_domain_count=avaset.platform_update_domain_count,
- platform_fault_domain_count=avaset.platform_fault_domain_count,
- tags=avaset.tags,
- sku=avaset.sku.name
- )
-
-
-class AzureRMAvailabilitySet(AzureRMModuleBase):
- """Configuration class for an Azure RM availability set resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- ),
- location=dict(
- type='str'
- ),
- platform_update_domain_count=dict(
- type='int',
- default=5
- ),
- platform_fault_domain_count=dict(
- type='int',
- default=3
- ),
- sku=dict(
- type='str',
- default='Classic',
- choices=['Classic', 'Aligned']
- )
- )
-
- self.resource_group = None
- self.name = None
- self.location = None
- self.tags = None
- self.platform_update_domain_count = None
- self.platform_fault_domain_count = None
- self.sku = None
- self.state = None
- self.warning = False
-
- self.results = dict(changed=False, state=dict())
-
- super(AzureRMAvailabilitySet, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- resource_group = None
- response = None
- to_be_updated = False
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- # Check if the AS already present in the RG
- if self.state == 'present':
- response = self.get_availabilityset()
- self.results['state'] = response
-
- if not response:
- to_be_updated = True
- else:
- update_tags, response['tags'] = self.update_tags(response['tags'])
-
- if update_tags:
- self.log("Tags has to be updated")
- to_be_updated = True
-
- if response['platform_update_domain_count'] != self.platform_update_domain_count:
- self.faildeploy('platform_update_domain_count')
-
- if response['platform_fault_domain_count'] != self.platform_fault_domain_count:
- self.faildeploy('platform_fault_domain_count')
-
- if response['sku'] != self.sku:
- self.faildeploy('sku')
-
- if self.check_mode:
- return self.results
-
- if to_be_updated:
- self.results['state'] = self.create_or_update_availabilityset()
- self.results['changed'] = True
-
- elif self.state == 'absent':
- self.delete_availabilityset()
- self.results['changed'] = True
-
- return self.results
-
- def faildeploy(self, param):
- '''
- Helper method to push fail message in the console.
- Useful to notify that the users cannot change some values in a Availability Set
-
- :param: variable's name impacted
- :return: void
- '''
- self.fail("You tried to change {0} but is was unsuccessful. An Availability Set is immutable, except tags".format(str(param)))
-
- def create_or_update_availabilityset(self):
- '''
- Method calling the Azure SDK to create or update the AS.
- :return: void
- '''
- self.log("Creating availabilityset {0}".format(self.name))
- try:
- params_sku = self.compute_models.Sku(
- name=self.sku
- )
- params = self.compute_models.AvailabilitySet(
- location=self.location,
- tags=self.tags,
- platform_update_domain_count=self.platform_update_domain_count,
- platform_fault_domain_count=self.platform_fault_domain_count,
- sku=params_sku
- )
- response = self.compute_client.availability_sets.create_or_update(self.resource_group, self.name, params)
- except CloudError as e:
- self.log('Error attempting to create the availability set.')
- self.fail("Error creating the availability set: {0}".format(str(e)))
-
- return availability_set_to_dict(response)
-
- def delete_availabilityset(self):
- '''
- Method calling the Azure SDK to delete the AS.
- :return: void
- '''
- self.log("Deleting availabilityset {0}".format(self.name))
- try:
- response = self.compute_client.availability_sets.delete(self.resource_group, self.name)
- except CloudError as e:
- self.log('Error attempting to delete the availability set.')
- self.fail("Error deleting the availability set: {0}".format(str(e)))
-
- return True
-
- def get_availabilityset(self):
- '''
- Method calling the Azure SDK to get an AS.
- :return: void
- '''
- self.log("Checking if the availabilityset {0} is present".format(self.name))
- found = False
- try:
- response = self.compute_client.availability_sets.get(self.resource_group, self.name)
- found = True
- except CloudError as e:
- self.log('Did not find the Availability set.')
- if found is True:
- return availability_set_to_dict(response)
- else:
- return False
-
-
-def main():
- """Main execution"""
- AzureRMAvailabilitySet()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_availabilityset_info.py b/lib/ansible/modules/cloud/azure/azure_rm_availabilityset_info.py
deleted file mode 100644
index df84730448..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_availabilityset_info.py
+++ /dev/null
@@ -1,216 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Julien Stroheker <juliens@microsoft.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_availabilityset_info
-
-version_added: "2.9"
-
-short_description: Get Azure Availability Set facts
-
-description:
- - Get facts for a specific availability set or all availability sets.
-
-options:
- name:
- description:
- - Limit results to a specific availability set.
- resource_group:
- description:
- - The resource group to search for the desired availability set.
- tags:
- description:
- - List of tags to be matched.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Julien Stroheker (@julienstroheker)
-'''
-
-EXAMPLES = '''
- - name: Get facts for one availability set
- azure_rm_availabilityset_info:
- name: Testing
- resource_group: myResourceGroup
-
- - name: Get facts for all availability sets in a specific resource group
- azure_rm_availabilityset_info:
- resource_group: myResourceGroup
-
-'''
-
-RETURN = '''
-azure_availabilityset:
- description: List of availability sets dicts.
- returned: always
- type: complex
- contains:
- location:
- description:
- - Location where the resource lives.
- type: str
- sample: eastus2
- name:
- description:
- - Resource name.
- type: str
- sample: myAvailabilitySet
- properties:
- description:
- - The properties of the resource.
- type: dict
- contains:
- platformFaultDomainCount:
- description:
- - Fault Domain count.
- type: int
- sample: 3
- platformUpdateDomainCount:
- description:
- - Update Domain count.
- type: int
- sample: 2
- virtualMachines:
- description:
- - A list of references to all virtualmachines in the availability set.
- type: list
- sample: []
- sku:
- description:
- - Location where the resource lives.
- type: str
- sample: Aligned
- type:
- description:
- - Resource type.
- type: str
- sample: "Microsoft.Compute/availabilitySets"
- tags:
- description:
- - Resource tags.
- type: dict
- sample: { env: sandbox }
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except Exception:
- # handled in azure_rm_common
- pass
-
-AZURE_OBJECT_CLASS = 'AvailabilitySet'
-
-
-class AzureRMAvailabilitySetInfo(AzureRMModuleBase):
- """Utility class to get availability set facts"""
-
- def __init__(self):
-
- self.module_args = dict(
- name=dict(type='str'),
- resource_group=dict(type='str'),
- tags=dict(type='list')
- )
-
- self.results = dict(
- changed=False,
- ansible_info=dict(
- azure_availabilitysets=[]
- )
- )
-
- self.name = None
- self.resource_group = None
- self.tags = None
-
- super(AzureRMAvailabilitySetInfo, self).__init__(
- derived_arg_spec=self.module_args,
- supports_tags=False,
- facts_module=True
- )
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_availabilityset_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_availabilityset_facts' module has been renamed to 'azure_rm_availabilityset_info'", version='2.13')
-
- for key in self.module_args:
- setattr(self, key, kwargs[key])
-
- if self.name and not self.resource_group:
- self.fail("Parameter error: resource group required when filtering by name.")
- if self.name:
- self.results['ansible_info']['azure_availabilitysets'] = self.get_item()
- else:
- self.results['ansible_info']['azure_availabilitysets'] = self.list_items()
-
- return self.results
-
- def get_item(self):
- """Get a single availability set"""
-
- self.log('Get properties for {0}'.format(self.name))
-
- item = None
- result = []
-
- try:
- item = self.compute_client.availability_sets.get(self.resource_group, self.name)
- except CloudError:
- pass
-
- if item and self.has_tags(item.tags, self.tags):
- avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
- avase['name'] = item.name
- avase['type'] = item.type
- avase['sku'] = item.sku.name
- result = [avase]
-
- return result
-
- def list_items(self):
- """Get all availability sets"""
-
- self.log('List all availability sets')
-
- try:
- response = self.compute_client.availability_sets.list(self.resource_group)
- except CloudError as exc:
- self.fail('Failed to list all items - {0}'.format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
- avase['name'] = item.name
- avase['type'] = item.type
- avase['sku'] = item.sku.name
- results.append(avase)
-
- return results
-
-
-def main():
- """Main module execution code path"""
-
- AzureRMAvailabilitySetInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_azurefirewall.py b/lib/ansible/modules/cloud/azure/azure_rm_azurefirewall.py
deleted file mode 100644
index 51ff7f8429..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_azurefirewall.py
+++ /dev/null
@@ -1,729 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino), Jurijs Fadejevs (@needgithubid)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_azurefirewall
-version_added: '2.9'
-short_description: Manage Azure Firewall instance
-description:
- - Create, update and delete instance of Azure Firewall.
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: true
- type: str
- name:
- description:
- - The name of the Azure Firewall.
- required: true
- type: str
- location:
- description:
- - Resource location.
- type: str
- application_rule_collections:
- description:
- - Collection of application rule collections used by Azure Firewall.
- type: list
- suboptions:
- priority:
- description:
- - Priority of the application rule collection resource.
- type: int
- action:
- description:
- - The action type of a rule collection.
- choices:
- - allow
- - deny
- type: str
- rules:
- description:
- - Collection of rules used by a application rule collection.
- type: list
- suboptions:
- name:
- description:
- - Name of the application rule.
- type: str
- description:
- description:
- - Description of the rule.
- type: str
- source_addresses:
- description:
- - List of source IP addresses for this rule.
- type: list
- protocols:
- description:
- - Array of ApplicationRuleProtocols.
- type: list
- target_fqdns:
- description:
- - List of FQDNs for this rule.
- type: list
- fqdn_tags:
- description:
- - List of FQDN Tags for this rule.
- type: list
- name:
- description:
- - Gets name of the resource that is unique within a resource group.
- - This name can be used to access the resource.
- type: str
- nat_rule_collections:
- description:
- - Collection of NAT rule collections used by Azure Firewall.
- type: list
- suboptions:
- priority:
- description:
- - Priority of the NAT rule collection resource.
- type: int
- action:
- description:
- - The action type of a NAT rule collection
- choices:
- - snat
- - dnat
- type: str
- rules:
- description:
- - Collection of rules used by a NAT rule collection.
- type: list
- suboptions:
- name:
- description:
- - Name of the NAT rule.
- type: str
- description:
- description:
- - Description of the rule.
- type: str
- source_addresses:
- description:
- - List of source IP addresses for this rule.
- type: list
- destination_addresses:
- description:
- - List of destination IP addresses for this rule.
- type: list
- destination_ports:
- description:
- - List of destination ports.
- type: list
- protocols:
- description:
- - Array of AzureFirewallNetworkRuleProtocols applicable to this NAT rule.
- type: list
- translated_address:
- description:
- - The translated address for this NAT rule.
- type: str
- translated_port:
- description:
- - The translated port for this NAT rule.
- type: str
- name:
- description:
- - Gets name of the resource that is unique within a resource group.
- - This name can be used to access the resource.
- type: str
- network_rule_collections:
- description:
- - Collection of network rule collections used by Azure Firewall.
- type: list
- suboptions:
- priority:
- description:
- - Priority of the network rule collection resource.
- type: int
- action:
- description:
- - The action type of a rule collection.
- type: str
- choices:
- - allow
- - deny
- rules:
- description:
- - Collection of rules used by a network rule collection.
- type: list
- suboptions:
- name:
- description:
- - Name of the network rule.
- type: str
- description:
- description:
- - Description of the rule.
- type: str
- protocols:
- description:
- - Array of AzureFirewallNetworkRuleProtocols.
- type: list
- source_addresses:
- description:
- - List of source IP addresses for this rule.
- type: list
- destination_addresses:
- description:
- - List of destination IP addresses.
- type: list
- destination_ports:
- description:
- - List of destination ports.
- type: list
- name:
- description:
- - Gets name of the resource that is unique within a resource group.
- - This name can be used to access the resource.
- type: str
- ip_configurations:
- description:
- - IP configuration of the Azure Firewall resource.
- type: list
- suboptions:
- subnet:
- description:
- - Existing subnet.
- - It can be a string containing subnet resource ID.
- - It can be a dictionary containing I(name), I(virtual_network_name) and optionally I(resource_group) .
- type: raw
- public_ip_address:
- description:
- - Existing public IP address.
- - It can be a string containing resource ID.
- - It can be a string containing a name in current resource group.
- - It can be a dictionary containing I(name) and optionally I(resource_group).
- type: raw
- name:
- description:
- - Name of the resource that is unique within a resource group.
- - This name can be used to access the resource.
- type: str
- state:
- description:
- - Assert the state of the AzureFirewall.
- - Use C(present) to create or update an AzureFirewall and C(absent) to delete it.
- default: present
- type: str
- choices:
- - absent
- - present
-extends_documentation_fragment:
- - azure
- - azure_tags
-author:
- - Zim Kalinowski (@zikalino)
- - Jurijs Fadejevs (@needgithubid)
-
-'''
-
-EXAMPLES = '''
-- name: Create Azure Firewall
- azure_rm_azurefirewall:
- resource_group: myResourceGroup
- name: myAzureFirewall
- tags:
- key1: value1
- application_rule_collections:
- - priority: 110
- action:
- type: deny
- rules:
- - name: rule1
- description: Deny inbound rule
- source_addresses:
- - 216.58.216.164
- - 10.0.0.0/24
- protocols:
- - type: https
- port: '443'
- target_fqdns:
- - www.test.com
- name: apprulecoll
- nat_rule_collections:
- - priority: 112
- action:
- type: dnat
- rules:
- - name: DNAT-HTTPS-traffic
- description: D-NAT all outbound web traffic for inspection
- source_addresses:
- - '*'
- destination_addresses:
- - 1.2.3.4
- destination_ports:
- - '443'
- protocols:
- - tcp
- translated_address: 1.2.3.5
- translated_port: '8443'
- name: natrulecoll
- network_rule_collections:
- - priority: 112
- action:
- type: deny
- rules:
- - name: L4-traffic
- description: Block traffic based on source IPs and ports
- protocols:
- - tcp
- source_addresses:
- - 192.168.1.1-192.168.1.12
- - 10.1.4.12-10.1.4.255
- destination_addresses:
- - '*'
- destination_ports:
- - 443-444
- - '8443'
- name: netrulecoll
- ip_configurations:
- - subnet: >-
- /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup
- /providers/Microsoft.Network/virtualNetworks/myVirtualNetwork
- /subnets/AzureFirewallSubnet
- public_ip_address: >-
- /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup
- /providers/Microsoft.Network/publicIPAddresses/
- myPublicIpAddress
- name: azureFirewallIpConfiguration
-- name: Delete Azure Firewall
- azure_rm_azurefirewall:
- resource_group: myResourceGroup
- name: myAzureFirewall
- state: absent
-
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/azureFirewalls/myAzureFirewall
-'''
-
-import time
-import json
-import re
-from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
-from ansible.module_utils.azure_rm_common_rest import GenericRestClient
-from copy import deepcopy
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMAzureFirewalls(AzureRMModuleBaseExt):
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- disposition='resource_group_name',
- required=True
- ),
- name=dict(
- type='str',
- disposition='azure_firewall_name',
- required=True
- ),
- location=dict(
- type='str',
- updatable=False,
- disposition='/',
- comparison='location'
- ),
- application_rule_collections=dict(
- type='list',
- disposition='/properties/applicationRuleCollections',
- options=dict(
- priority=dict(
- type='int',
- disposition='properties/*'
- ),
- action=dict(
- type='str',
- choices=['allow',
- 'deny'],
- disposition='properties/action/type',
- pattern='camelize'
- ),
- rules=dict(
- type='list',
- disposition='properties/*',
- options=dict(
- name=dict(
- type='str'
- ),
- description=dict(
- type='str'
- ),
- source_addresses=dict(
- type='list',
- disposition='sourceAddresses'
- ),
- protocols=dict(
- type='list',
- options=dict(
- type=dict(
- type='str',
- disposition='protocolType'
- ),
- port=dict(
- type='str'
- )
- )
- ),
- target_fqdns=dict(
- type='list',
- disposition='targetFqdns'
- ),
- fqdn_tags=dict(
- type='list',
- disposition='fqdnTags'
- )
- )
- ),
- name=dict(
- type='str'
- )
- )
- ),
- nat_rule_collections=dict(
- type='list',
- disposition='/properties/natRuleCollections',
- options=dict(
- priority=dict(
- type='int',
- disposition='properties/*'
- ),
- action=dict(
- type='str',
- disposition='properties/action/type',
- choices=['snat',
- 'dnat'],
- pattern='camelize'
- ),
- rules=dict(
- type='list',
- disposition='properties/*',
- options=dict(
- name=dict(
- type='str'
- ),
- description=dict(
- type='str'
- ),
- source_addresses=dict(
- type='list',
- disposition='sourceAddresses'
- ),
- destination_addresses=dict(
- type='list',
- disposition='destinationAddresses'
- ),
- destination_ports=dict(
- type='list',
- disposition='destinationPorts'
- ),
- protocols=dict(
- type='list'
- ),
- translated_address=dict(
- type='str',
- disposition='translatedAddress'
- ),
- translated_port=dict(
- type='str',
- disposition='translatedPort'
- )
- )
- ),
- name=dict(
- type='str'
- )
- )
- ),
- network_rule_collections=dict(
- type='list',
- disposition='/properties/networkRuleCollections',
- options=dict(
- priority=dict(
- type='int',
- disposition='properties/*'
- ),
- action=dict(
- type='str',
- choices=['allow',
- 'deny'],
- disposition='properties/action/type',
- pattern='camelize'
- ),
- rules=dict(
- type='list',
- disposition='properties/*',
- options=dict(
- name=dict(
- type='str'
- ),
- description=dict(
- type='str'
- ),
- protocols=dict(
- type='list'
- ),
- source_addresses=dict(
- type='list',
- disposition='sourceAddresses'
- ),
- destination_addresses=dict(
- type='list',
- disposition='destinationAddresses'
- ),
- destination_ports=dict(
- type='list',
- disposition='destinationPorts'
- )
- )
- ),
- name=dict(
- type='str'
- )
- )
- ),
- ip_configurations=dict(
- type='list',
- disposition='/properties/ipConfigurations',
- options=dict(
- subnet=dict(
- type='raw',
- disposition='properties/subnet/id',
- pattern=('/subscriptions/{subscription_id}/resourceGroups'
- '/{resource_group}/providers/Microsoft.Network'
- '/virtualNetworks/{virtual_network_name}/subnets'
- '/{name}')
- ),
- public_ip_address=dict(
- type='raw',
- disposition='properties/publicIPAddress/id',
- pattern=('/subscriptions/{subscription_id}/resourceGroups'
- '/{resource_group}/providers/Microsoft.Network'
- '/publicIPAddresses/{name}')
- ),
- name=dict(
- type='str'
- )
- )
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.name = None
- self.body = {}
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.url = None
- self.status_code = [200, 201, 202]
- self.to_do = Actions.NoAction
-
- self.body = {}
- self.query_parameters = {}
- self.query_parameters['api-version'] = '2018-11-01'
- self.header_parameters = {}
- self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
-
- super(AzureRMAzureFirewalls, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- self.body[key] = kwargs[key]
-
- self.inflate_parameters(self.module_arg_spec, self.body, 0)
-
- old_response = None
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- resource_group = self.get_resource_group(self.resource_group)
-
- if 'location' not in self.body:
- self.body['location'] = resource_group.location
-
- self.url = ('/subscriptions' +
- '/' + self.subscription_id +
- '/resourceGroups' +
- '/' + self.resource_group +
- '/providers' +
- '/Microsoft.Network' +
- '/azureFirewalls' +
- '/' + self.name)
-
- old_response = self.get_resource()
-
- if not old_response:
- self.log("AzureFirewall instance doesn't exist")
-
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log('AzureFirewall instance already exists')
-
- if self.state == 'absent':
- self.to_do = Actions.Delete
- else:
- modifiers = {}
- self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
- self.results['modifiers'] = modifiers
- self.results['compare'] = []
- if not self.default_compare(modifiers, self.body, old_response, '', self.results):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log('Need to Create / Update the AzureFirewall instance')
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_resource()
-
- # if not old_response:
- self.results['changed'] = True
- # else:
- # self.results['changed'] = old_response.__ne__(response)
- self.log('Creation / Update done')
- elif self.to_do == Actions.Delete:
- self.log('AzureFirewall instance deleted')
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_resource()
-
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_resource():
- time.sleep(20)
- else:
- self.log('AzureFirewall instance unchanged')
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
- while response['properties']['provisioningState'] == 'Updating':
- time.sleep(30)
- response = self.get_resource()
-
- return self.results
-
- def create_update_resource(self):
- # self.log('Creating / Updating the AzureFirewall instance {0}'.format(self.))
-
- try:
- response = self.mgmt_client.query(self.url,
- 'PUT',
- self.query_parameters,
- self.header_parameters,
- self.body,
- self.status_code,
- 600,
- 30)
- except CloudError as exc:
- self.log('Error attempting to create the AzureFirewall instance.')
- self.fail('Error creating the AzureFirewall instance: {0}'.format(str(exc)))
-
- try:
- response = json.loads(response.text)
- except Exception:
- response = {'text': response.text}
-
- return response
-
- def delete_resource(self):
- # self.log('Deleting the AzureFirewall instance {0}'.format(self.))
- try:
- response = self.mgmt_client.query(self.url,
- 'DELETE',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- except CloudError as e:
- self.log('Error attempting to delete the AzureFirewall instance.')
- self.fail('Error deleting the AzureFirewall instance: {0}'.format(str(e)))
-
- return True
-
- def get_resource(self):
- # self.log('Checking if the AzureFirewall instance {0} is present'.format(self.))
- found = False
- try:
- response = self.mgmt_client.query(self.url,
- 'GET',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- response = json.loads(response.text)
- found = True
- self.log("Response : {0}".format(response))
- # self.log("AzureFirewall instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the AzureFirewall instance.')
- if found is True:
- return response
-
- return False
-
-
-def main():
- AzureRMAzureFirewalls()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_azurefirewall_info.py b/lib/ansible/modules/cloud/azure/azure_rm_azurefirewall_info.py
deleted file mode 100644
index d7c959e3dc..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_azurefirewall_info.py
+++ /dev/null
@@ -1,275 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Liu Qingyi, (@smile37773)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_azurefirewall_info
-version_added: '2.9'
-short_description: Get AzureFirewall info
-description:
- - Get info of AzureFirewall.
-options:
- resource_group:
- description:
- - The name of the resource group.
- type: str
- name:
- description:
- - Resource name.
- type: str
-extends_documentation_fragment:
- - azure
-author:
- - Liu Qingyi (@smile37773)
-
-'''
-
-EXAMPLES = '''
-- name: List all Azure Firewalls for a given subscription
- azure_rm_azurefirewall_info:
-- name: List all Azure Firewalls for a given resource group
- azure_rm_azurefirewall_info:
- resource_group: myResourceGroup
-- name: Get Azure Firewall
- azure_rm_azurefirewall_info:
- resource_group: myResourceGroup
- name: myAzureFirewall
-
-'''
-
-RETURN = '''
-firewalls:
- description:
- - A list of dict results where the key is the name of the AzureFirewall and the values are the facts for that AzureFirewall.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/
- myResourceGroup/providers/Microsoft.Network/azureFirewalls/myAzureFirewall"
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: "myAzureFirewall"
- location:
- description:
- - Resource location.
- returned: always
- type: str
- sample: "eastus"
- tags:
- description:
- - Resource tags.
- returned: always
- type: dict
- sample: { "tag": "value" }
- etag:
- description:
- - Gets a unique read-only string that changes whenever the resource is updated.
- returned: always
- type: str
- sample: "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
- nat_rule_collections:
- description:
- - Collection of NAT rule collections used by Azure Firewall.
- type: list
- network_rule_collections:
- description:
- - Collection of network rule collections used by Azure Firewall.
- type: list
- ip_configurations:
- description:
- - IP configuration of the Azure Firewall resource.
- type: list
- provisioning_state:
- description:
- - The current state of the gallery.
- type: str
- sample: "Succeeded"
-
-'''
-
-import time
-import json
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.azure_rm_common_rest import GenericRestClient
-from copy import deepcopy
-try:
- from msrestazure.azure_exceptions import CloudError
-except Exception:
- # handled in azure_rm_common
- pass
-
-
-class AzureRMAzureFirewallsInfo(AzureRMModuleBase):
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str'
- ),
- name=dict(
- type='str'
- )
- )
-
- self.resource_group = None
- self.name = None
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.url = None
- self.status_code = [200]
-
- self.query_parameters = {}
- self.query_parameters['api-version'] = '2018-11-01'
- self.header_parameters = {}
- self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
-
- self.mgmt_client = None
- super(AzureRMAzureFirewallsInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if (self.resource_group is not None and self.name is not None):
- self.results['firewalls'] = self.get()
- elif (self.resource_group is not None):
- self.results['firewalls'] = self.list()
- else:
- self.results['firewalls'] = self.listall()
- return self.results
-
- def get(self):
- response = None
- results = {}
- # prepare url
- self.url = ('/subscriptions' +
- '/{{ subscription_id }}' +
- '/resourceGroups' +
- '/{{ resource_group }}' +
- '/providers' +
- '/Microsoft.Network' +
- '/azureFirewalls' +
- '/{{ azure_firewall_name }}')
- self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
- self.url = self.url.replace('{{ resource_group }}', self.resource_group)
- self.url = self.url.replace('{{ azure_firewall_name }}', self.name)
-
- try:
- response = self.mgmt_client.query(self.url,
- 'GET',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- results = json.loads(response.text)
- # self.log('Response : {0}'.format(response))
- except CloudError as e:
- self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
-
- return self.format_item(results)
-
- def list(self):
- response = None
- results = {}
- # prepare url
- self.url = ('/subscriptions' +
- '/{{ subscription_id }}' +
- '/resourceGroups' +
- '/{{ resource_group }}' +
- '/providers' +
- '/Microsoft.Network' +
- '/azureFirewalls')
- self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
- self.url = self.url.replace('{{ resource_group }}', self.resource_group)
-
- try:
- response = self.mgmt_client.query(self.url,
- 'GET',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- results = json.loads(response.text)
- # self.log('Response : {0}'.format(response))
- except CloudError as e:
- self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
-
- return [self.format_item(x) for x in results['value']] if results['value'] else []
-
- def listall(self):
- response = None
- results = {}
- # prepare url
- self.url = ('/subscriptions' +
- '/{{ subscription_id }}' +
- '/providers' +
- '/Microsoft.Network' +
- '/azureFirewalls')
- self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
-
- try:
- response = self.mgmt_client.query(self.url,
- 'GET',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- results = json.loads(response.text)
- # self.log('Response : {0}'.format(response))
- except CloudError as e:
- self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
-
- return [self.format_item(x) for x in results['value']] if results['value'] else []
-
- def format_item(self, item):
- d = {
- 'id': item['id'],
- 'name': item['name'],
- 'location': item['location'],
- 'etag': item['etag'],
- 'tags': item.get('tags'),
- 'nat_rule_collections': item['properties']['natRuleCollections'],
- 'network_rule_collections': item['properties']['networkRuleCollections'],
- 'ip_configurations': item['properties']['ipConfigurations'],
- 'provisioning_state': item['properties']['provisioningState']
- }
- return d
-
-
-def main():
- AzureRMAzureFirewallsInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_batchaccount.py b/lib/ansible/modules/cloud/azure/azure_rm_batchaccount.py
deleted file mode 100644
index 74ab717d83..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_batchaccount.py
+++ /dev/null
@@ -1,341 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (C) 2019 Junyi Yi (@JunyiYi)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-# ----------------------------------------------------------------------------
-#
-# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
-#
-# ----------------------------------------------------------------------------
-#
-# This file is automatically generated by Magic Modules and manual
-# changes will be clobbered when the file is regenerated.
-#
-#
-# ----------------------------------------------------------------------------
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_batchaccount
-version_added: "2.9"
-short_description: Manages a Batch Account on Azure
-description:
- - Create, update and delete instance of Azure Batch Account.
-
-options:
- resource_group:
- description:
- - The name of the resource group in which to create the Batch Account.
- required: true
- type: str
- name:
- description:
- - The name of the Batch Account.
- required: true
- type: str
- location:
- description:
- - Specifies the supported Azure location where the resource exists.
- type: str
- auto_storage_account:
- description:
- - Existing storage account with which to associate the Batch Account.
- - It can be the storage account name which is in the same resource group.
- - It can be the storage account ID. Fox example "/subscriptions/{subscription_id}/resourceGroups/
- {resource_group}/providers/Microsoft.Storage/storageAccounts/{name}".
- - It can be a dict which contains I(name) and I(resource_group) of the storage account.
- key_vault:
- description:
- - Existing key vault with which to associate the Batch Account.
- - It can be the key vault name which is in the same resource group.
- - It can be the key vault ID. For example "/subscriptions/{subscription_id}/resourceGroups/
- {resource_group}/providers/Microsoft.KeyVault/vaults/{name}".
- - It can be a dict which contains I(name) and I(resource_group) of the key vault.
- pool_allocation_mode:
- description:
- - The pool acclocation mode of the Batch Account.
- default: batch_service
- choices:
- - batch_service
- - user_subscription
- type: str
- state:
- description:
- - Assert the state of the Batch Account.
- - Use C(present) to create or update a Batch Account and C(absent) to delete it.
- default: present
- type: str
- choices:
- - present
- - absent
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Junyi Yi (@JunyiYi)
-'''
-
-EXAMPLES = '''
- - name: Create Batch Account
- azure_rm_batchaccount:
- resource_group: MyResGroup
- name: mybatchaccount
- location: eastus
- auto_storage_account:
- name: mystorageaccountname
- pool_allocation_mode: batch_service
-'''
-
-RETURN = '''
-id:
- description:
- - The ID of the Batch account.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Batch/batchAccounts/sampleacct"
-account_endpoint:
- description:
- - The account endpoint used to interact with the Batch service.
- returned: always
- type: str
- sample: sampleacct.westus.batch.azure.com
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import normalize_location_name
-from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
-from ansible.module_utils.common.dict_transformations import _snake_to_camel
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrestazure.azure_operation import AzureOperationPoller
- from msrest.serialization import Model
- from azure.mgmt.batch import BatchManagementClient
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMBatchAccount(AzureRMModuleBaseExt):
- """Configuration class for an Azure RM Batch Account resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- required=True,
- type='str'
- ),
- name=dict(
- required=True,
- type='str'
- ),
- location=dict(
- type='str',
- updatable=False,
- disposition='/'
- ),
- auto_storage_account=dict(
- type='raw'
- ),
- key_vault=dict(
- type='raw',
- updatable=False,
- disposition='/'
- ),
- pool_allocation_mode=dict(
- default='batch_service',
- type='str',
- choices=['batch_service', 'user_subscription'],
- updatable=False,
- disposition='/'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.name = None
- self.batch_account = dict()
- self.tags = None
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMBatchAccount, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- self.batch_account[key] = kwargs[key]
-
- resource_group = self.get_resource_group(self.resource_group)
- if self.batch_account.get('location') is None:
- self.batch_account['location'] = resource_group.location
- if self.batch_account.get('auto_storage_account') is not None:
- self.batch_account['auto_storage'] = {
- 'storage_account_id': self.normalize_resource_id(
- self.batch_account.pop('auto_storage_account'),
- '/subscriptions/{subscription_id}/resourceGroups/{resource_group}/providers/Microsoft.Storage/storageAccounts/{name}')
- }
- if self.batch_account.get('key_vault') is not None:
- id = self.normalize_resource_id(
- self.batch_account.pop('key_vault'),
- '/subscriptions/{subscription_id}/resourceGroups/{resource_group}/providers/Microsoft.KeyVault/vaults/{name}')
- url = 'https://' + id.split('/').pop() + '.vault.azure.net/'
- self.batch_account['key_vault_reference'] = {
- 'id': id,
- 'url': url
- }
- self.batch_account['pool_allocation_mode'] = _snake_to_camel(self.batch_account['pool_allocation_mode'], True)
-
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(BatchManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- old_response = self.get_batchaccount()
-
- if not old_response:
- self.log("Batch Account instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("Batch Account instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.results['old'] = old_response
- self.results['new'] = self.batch_account
- if not self.idempotency_check(old_response, self.batch_account):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the Batch Account instance")
-
- self.results['changed'] = True
- if self.check_mode:
- return self.results
-
- response = self.create_update_batchaccount()
-
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("Batch Account instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_batchaccount()
- else:
- self.log("Batch Account instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if self.state == 'present':
- self.results.update({
- 'id': response.get('id', None),
- 'account_endpoint': response.get('account_endpoint', None)
- })
- return self.results
-
- def create_update_batchaccount(self):
- '''
- Creates or updates Batch Account with the specified configuration.
-
- :return: deserialized Batch Account instance state dictionary
- '''
- self.log("Creating / Updating the Batch Account instance {0}".format(self.name))
-
- try:
- if self.to_do == Actions.Create:
- response = self.mgmt_client.batch_account.create(resource_group_name=self.resource_group,
- account_name=self.name,
- parameters=self.batch_account)
- else:
- response = self.mgmt_client.batch_account.update(resource_group_name=self.resource_group,
- account_name=self.name,
- tags=self.tags,
- auto_storage=self.batch_account.get('auto_storage'))
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
- except CloudError as exc:
- self.log('Error attempting to create the Batch Account instance.')
- self.fail("Error creating the Batch Account instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_batchaccount(self):
- '''
- Deletes specified Batch Account instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the Batch Account instance {0}".format(self.name))
- try:
- response = self.mgmt_client.batch_account.delete(resource_group_name=self.resource_group,
- account_name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the Batch Account instance.')
- self.fail("Error deleting the Batch Account instance: {0}".format(str(e)))
-
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
- return True
-
- def get_batchaccount(self):
- '''
- Gets the properties of the specified Batch Account
- :return: deserialized Batch Account instance state dictionary
- '''
- self.log("Checking if the Batch Account instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mgmt_client.batch_account.get(resource_group_name=self.resource_group,
- account_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("Batch Account instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the Batch Account instance.')
- if found is True:
- return response.as_dict()
- return False
-
-
-def main():
- """Main execution"""
- AzureRMBatchAccount()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint.py b/lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint.py
deleted file mode 100644
index a2db425d61..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint.py
+++ /dev/null
@@ -1,666 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Hai Cao, <t-haicao@microsoft.com>, Yunge Zhu <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_cdnendpoint
-version_added: "2.8"
-short_description: Manage a Azure CDN endpoint
-description:
- - Create, update, start, stop and delete a Azure CDN endpoint.
-
-options:
- resource_group:
- description:
- - Name of a resource group where the Azure CDN endpoint exists or will be created.
- required: true
- name:
- description:
- - Name of the Azure CDN endpoint.
- required: true
- location:
- description:
- - Valid azure location. Defaults to location of the resource group.
- started:
- description:
- - Use with I(state=present) to start the endpoint.
- type: bool
- purge:
- description:
- - Use with I(state=present) to purge the endpoint.
- type: bool
- default: false
- purge_content_paths:
- description:
- - Use with I(state=present) and I(purge=true) to specify content paths to be purged.
- type: list
- default: ['/']
- profile_name:
- description:
- - Name of the CDN profile where the endpoint attached to.
- required: true
- origins:
- description:
- - Set of source of the content being delivered via CDN.
- suboptions:
- name:
- description:
- - Origin name.
- required: true
- host_name:
- description:
- - The address of the origin.
- - It can be a domain name, IPv4 address, or IPv6 address.
- required: true
- http_port:
- description:
- - The value of the HTTP port. Must be between C(1) and C(65535).
- type: int
- https_port:
- description:
- - The value of the HTTPS port. Must be between C(1) and C(65535).
- type: int
- required: true
- origin_host_header:
- description:
- - The host header value sent to the origin with each request.
- type: str
- origin_path:
- description:
- - A directory path on the origin that CDN can use to retrieve content from.
- - E.g. contoso.cloudapp.net/originpath.
- type: str
- content_types_to_compress:
- description:
- - List of content types on which compression applies.
- - This value should be a valid MIME type.
- type: list
- is_compression_enabled:
- description:
- - Indicates whether content compression is enabled on CDN.
- type: bool
- default: false
- is_http_allowed:
- description:
- - Indicates whether HTTP traffic is allowed on the endpoint.
- type: bool
- default: true
- is_https_allowed:
- description:
- - Indicates whether HTTPS traffic is allowed on the endpoint.
- type: bool
- default: true
- query_string_caching_behavior:
- description:
- - Defines how CDN caches requests that include query strings.
- type: str
- choices:
- - ignore_query_string
- - bypass_caching
- - use_query_string
- - not_set
- default: ignore_query_string
- state:
- description:
- - Assert the state of the Azure CDN endpoint. Use C(present) to create or update a Azure CDN endpoint and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yunge Zhu (@yungezz)
-'''
-
-EXAMPLES = '''
- - name: Create a Azure CDN endpoint
- azure_rm_cdnendpoint:
- resource_group: myResourceGroup
- profile_name: myProfile
- name: myEndpoint
- origins:
- - name: TestOrig
- host_name: "www.example.com"
- tags:
- testing: testing
- delete: on-exit
- foo: bar
- - name: Delete a Azure CDN endpoint
- azure_rm_cdnendpoint:
- resource_group: myResourceGroup
- profile_name: myProfile
- name: myEndpoint
- state: absent
-'''
-RETURN = '''
-state:
- description: Current state of the Azure CDN endpoint.
- returned: always
- type: str
-id:
- description:
- - Id of the CDN endpoint.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Cdn/profiles/myProfile/endpoints/
- myEndpoint"
-host_name:
- description:
- - Host name of the CDN endpoint.
- returned: always
- type: str
- sample: "myendpoint.azureedge.net"
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _snake_to_camel
-
-try:
- from azure.mgmt.cdn.models import Endpoint, DeepCreatedOrigin, EndpointUpdateParameters, QueryStringCachingBehavior, ErrorResponseException
- from azure.mgmt.cdn import CdnManagementClient
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def cdnendpoint_to_dict(cdnendpoint):
- return dict(
- id=cdnendpoint.id,
- name=cdnendpoint.name,
- type=cdnendpoint.type,
- location=cdnendpoint.location,
- tags=cdnendpoint.tags,
- origin_host_header=cdnendpoint.origin_host_header,
- origin_path=cdnendpoint.origin_path,
- content_types_to_compress=cdnendpoint.content_types_to_compress,
- is_compression_enabled=cdnendpoint.is_compression_enabled,
- is_http_allowed=cdnendpoint.is_http_allowed,
- is_https_allowed=cdnendpoint.is_https_allowed,
- query_string_caching_behavior=cdnendpoint.query_string_caching_behavior,
- optimization_type=cdnendpoint.optimization_type,
- probe_path=cdnendpoint.probe_path,
- geo_filters=[geo_filter_to_dict(geo_filter) for geo_filter in cdnendpoint.geo_filters] if cdnendpoint.geo_filters else None,
- host_name=cdnendpoint.host_name,
- origins=[deep_created_origin_to_dict(origin) for origin in cdnendpoint.origins] if cdnendpoint.origins else None,
- resource_state=cdnendpoint.resource_state,
- provisioning_state=cdnendpoint.provisioning_state
- )
-
-
-def deep_created_origin_to_dict(origin):
- return dict(
- name=origin.name,
- host_name=origin.host_name,
- http_port=origin.http_port,
- https_port=origin.https_port,
- )
-
-
-def geo_filter_to_dict(geo_filter):
- return dict(
- relative_path=geo_filter.relative_path,
- action=geo_filter.action,
- country_codes=geo_filter.country_codes,
- )
-
-
-def default_content_types():
- return ["text/plain",
- "text/html",
- "text/css",
- "text/javascript",
- "application/x-javascript",
- "application/javascript",
- "application/json",
- "application/xml"]
-
-
-origin_spec = dict(
- name=dict(
- type='str',
- required=True
- ),
- host_name=dict(
- type='str',
- required=True
- ),
- http_port=dict(
- type='int'
- ),
- https_port=dict(
- type='int'
- )
-)
-
-
-class AzureRMCdnendpoint(AzureRMModuleBase):
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- ),
- started=dict(
- type='bool'
- ),
- purge=dict(
- type='bool'
- ),
- purge_content_paths=dict(
- type='list',
- elements='str',
- default=['/']
- ),
- profile_name=dict(
- type='str',
- required=True
- ),
- origins=dict(
- type='list',
- elements='dict',
- options=origin_spec
- ),
- origin_host_header=dict(
- type='str',
- ),
- origin_path=dict(
- type='str',
- ),
- content_types_to_compress=dict(
- type='list',
- elements='str',
- ),
- is_compression_enabled=dict(
- type='bool',
- default=False
- ),
- is_http_allowed=dict(
- type='bool',
- default=True
- ),
- is_https_allowed=dict(
- type='bool',
- default=True
- ),
- query_string_caching_behavior=dict(
- type='str',
- choices=[
- 'ignore_query_string',
- 'bypass_caching',
- 'use_query_string',
- 'not_set'
- ],
- default='ignore_query_string'
- ),
- )
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.started = None
- self.purge = None
- self.purge_content_paths = None
- self.location = None
- self.profile_name = None
- self.origins = None
- self.tags = None
- self.origin_host_header = None
- self.origin_path = None
- self.content_types_to_compress = None
- self.is_compression_enabled = None
- self.is_http_allowed = None
- self.is_https_allowed = None
- self.query_string_caching_behavior = None
-
- self.cdn_client = None
-
- self.results = dict(changed=False)
-
- super(AzureRMCdnendpoint, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- self.cdn_client = self.get_cdn_client()
-
- to_be_updated = False
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- if self.query_string_caching_behavior:
- self.query_string_caching_behavior = _snake_to_camel(self.query_string_caching_behavior)
-
- response = self.get_cdnendpoint()
-
- if self.state == 'present':
-
- if not response:
-
- if self.started is None:
- # If endpoint doesn't exist and no start/stop operation specified, create endpoint.
- if self.origins is None:
- self.fail("Origins is not provided when trying to create endpoint")
- self.log("Need to create the Azure CDN endpoint")
-
- if not self.check_mode:
- result = self.create_cdnendpoint()
- self.results['id'] = result['id']
- self.results['host_name'] = result['host_name']
- self.log("Creation done")
-
- self.results['changed'] = True
- return self.results
-
- else:
- # Fail the module when user try to start/stop a non-existed endpoint
- self.log("Can't stop/stop a non-existed endpoint")
- self.fail("This endpoint is not found, stop/start is forbidden")
-
- else:
- self.log('Results : {0}'.format(response))
- self.results['id'] = response['id']
- self.results['host_name'] = response['host_name']
-
- update_tags, response['tags'] = self.update_tags(response['tags'])
-
- if update_tags:
- to_be_updated = True
-
- if response['provisioning_state'] == "Succeeded":
- if self.started is False and response['resource_state'] == 'Running':
- self.log("Need to stop the Azure CDN endpoint")
-
- if not self.check_mode:
- result = self.stop_cdnendpoint()
- self.log("Endpoint stopped")
-
- self.results['changed'] = True
-
- elif self.started and response['resource_state'] == 'Stopped':
- self.log("Need to start the Azure CDN endpoint")
-
- if not self.check_mode:
- result = self.start_cdnendpoint()
- self.log("Endpoint started")
-
- self.results['changed'] = True
-
- elif self.started is not None:
- self.module.warn("Start/Stop not performed due to current resource state {0}".format(response['resource_state']))
- self.results['changed'] = False
-
- if self.purge:
- self.log("Need to purge endpoint")
-
- if not self.check_mode:
- result = self.purge_cdnendpoint()
- self.log("Endpoint purged")
-
- self.results['changed'] = True
-
- to_be_updated = to_be_updated or self.check_update(response)
-
- if to_be_updated:
- self.log("Need to update the Azure CDN endpoint")
- self.results['changed'] = True
-
- if not self.check_mode:
- result = self.update_cdnendpoint()
- self.results['host_name'] = result['host_name']
- self.log("Update done")
-
- elif self.started is not None:
- self.module.warn("Start/Stop not performed due to current provisioning state {0}".format(response['provisioning_state']))
- self.results['changed'] = False
-
- elif self.state == 'absent' and response:
- self.log("Need to delete the Azure CDN endpoint")
- self.results['changed'] = True
-
- if not self.check_mode:
- self.delete_cdnendpoint()
- self.log("Azure CDN endpoint deleted")
-
- return self.results
-
- def create_cdnendpoint(self):
- '''
- Creates a Azure CDN endpoint.
-
- :return: deserialized Azure CDN endpoint instance state dictionary
- '''
- self.log("Creating the Azure CDN endpoint instance {0}".format(self.name))
-
- origins = []
- for item in self.origins:
- origins.append(
- DeepCreatedOrigin(name=item['name'],
- host_name=item['host_name'],
- http_port=item['http_port'] if 'http_port' in item else None,
- https_port=item['https_port'] if 'https_port' in item else None)
- )
-
- parameters = Endpoint(
- origins=origins,
- location=self.location,
- tags=self.tags,
- origin_host_header=self.origin_host_header,
- origin_path=self.origin_path,
- content_types_to_compress=default_content_types() if self.is_compression_enabled and not self.content_types_to_compress
- else self.content_types_to_compress,
- is_compression_enabled=self.is_compression_enabled if self.is_compression_enabled is not None else False,
- is_http_allowed=self.is_http_allowed if self.is_http_allowed is not None else True,
- is_https_allowed=self.is_https_allowed if self.is_https_allowed is not None else True,
- query_string_caching_behavior=self.query_string_caching_behavior if self.query_string_caching_behavior
- else QueryStringCachingBehavior.ignore_query_string
- )
-
- try:
- poller = self.cdn_client.endpoints.create(self.resource_group, self.profile_name, self.name, parameters)
- response = self.get_poller_result(poller)
- return cdnendpoint_to_dict(response)
- except ErrorResponseException as exc:
- self.log('Error attempting to create Azure CDN endpoint instance.')
- self.fail("Error creating Azure CDN endpoint instance: {0}".format(exc.message))
-
- def update_cdnendpoint(self):
- '''
- Updates a Azure CDN endpoint.
-
- :return: deserialized Azure CDN endpoint instance state dictionary
- '''
- self.log("Updating the Azure CDN endpoint instance {0}".format(self.name))
-
- endpoint_update_properties = EndpointUpdateParameters(
- tags=self.tags,
- origin_host_header=self.origin_host_header,
- origin_path=self.origin_path,
- content_types_to_compress=default_content_types() if self.is_compression_enabled and not self.content_types_to_compress
- else self.content_types_to_compress,
- is_compression_enabled=self.is_compression_enabled,
- is_http_allowed=self.is_http_allowed,
- is_https_allowed=self.is_https_allowed,
- query_string_caching_behavior=self.query_string_caching_behavior,
- )
-
- try:
- poller = self.cdn_client.endpoints.update(self.resource_group, self.profile_name, self.name, endpoint_update_properties)
- response = self.get_poller_result(poller)
- return cdnendpoint_to_dict(response)
- except ErrorResponseException as exc:
- self.log('Error attempting to update Azure CDN endpoint instance.')
- self.fail("Error updating Azure CDN endpoint instance: {0}".format(exc.message))
-
- def delete_cdnendpoint(self):
- '''
- Deletes the specified Azure CDN endpoint in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the Azure CDN endpoint {0}".format(self.name))
- try:
- poller = self.cdn_client.endpoints.delete(
- self.resource_group, self.profile_name, self.name)
- self.get_poller_result(poller)
- return True
- except ErrorResponseException as e:
- self.log('Error attempting to delete the Azure CDN endpoint.')
- self.fail("Error deleting the Azure CDN endpoint: {0}".format(e.message))
- return False
-
- def get_cdnendpoint(self):
- '''
- Gets the properties of the specified Azure CDN endpoint.
-
- :return: deserialized Azure CDN endpoint state dictionary
- '''
- self.log(
- "Checking if the Azure CDN endpoint {0} is present".format(self.name))
- try:
- response = self.cdn_client.endpoints.get(self.resource_group, self.profile_name, self.name)
- self.log("Response : {0}".format(response))
- self.log("Azure CDN endpoint : {0} found".format(response.name))
- return cdnendpoint_to_dict(response)
- except ErrorResponseException:
- self.log('Did not find the Azure CDN endpoint.')
- return False
-
- def start_cdnendpoint(self):
- '''
- Starts an existing Azure CDN endpoint that is on a stopped state.
-
- :return: deserialized Azure CDN endpoint state dictionary
- '''
- self.log(
- "Starting the Azure CDN endpoint {0}".format(self.name))
- try:
- poller = self.cdn_client.endpoints.start(self.resource_group, self.profile_name, self.name)
- response = self.get_poller_result(poller)
- self.log("Response : {0}".format(response))
- self.log("Azure CDN endpoint : {0} started".format(response.name))
- return self.get_cdnendpoint()
- except ErrorResponseException:
- self.log('Fail to start the Azure CDN endpoint.')
- return False
-
- def purge_cdnendpoint(self):
- '''
- Purges an existing Azure CDN endpoint.
-
- :return: deserialized Azure CDN endpoint state dictionary
- '''
- self.log(
- "Purging the Azure CDN endpoint {0}".format(self.name))
- try:
- poller = self.cdn_client.endpoints.purge_content(self.resource_group,
- self.profile_name,
- self.name,
- content_paths=self.purge_content_paths)
- response = self.get_poller_result(poller)
- self.log("Response : {0}".format(response))
- return self.get_cdnendpoint()
- except ErrorResponseException as e:
- self.log('Fail to purge the Azure CDN endpoint.')
- return False
-
- def stop_cdnendpoint(self):
- '''
- Stops an existing Azure CDN endpoint that is on a running state.
-
- :return: deserialized Azure CDN endpoint state dictionary
- '''
- self.log(
- "Stopping the Azure CDN endpoint {0}".format(self.name))
- try:
- poller = self.cdn_client.endpoints.stop(self.resource_group, self.profile_name, self.name)
- response = self.get_poller_result(poller)
- self.log("Response : {0}".format(response))
- self.log("Azure CDN endpoint : {0} stopped".format(response.name))
- return self.get_cdnendpoint()
- except ErrorResponseException:
- self.log('Fail to stop the Azure CDN endpoint.')
- return False
-
- def check_update(self, response):
-
- if self.origin_host_header and response['origin_host_header'] != self.origin_host_header:
- self.log("Origin host header Diff - Origin {0} / Update {1}".format(response['origin_host_header'], self.origin_host_header))
- return True
-
- if self.origin_path and response['origin_path'] != self.origin_path:
- self.log("Origin path Diff - Origin {0} / Update {1}".format(response['origin_path'], self.origin_path))
- return True
-
- if self.content_types_to_compress and response['content_types_to_compress'] != self.content_types_to_compress:
- self.log("Content types to compress Diff - Origin {0} / Update {1}".format(response['content_types_to_compress'], self.content_types_to_compress))
- return True
-
- if self.is_compression_enabled is not None and response['is_compression_enabled'] != self.is_compression_enabled:
- self.log("is_compression_enabled Diff - Origin {0} / Update {1}".format(response['is_compression_enabled'], self.is_compression_enabled))
- return True
-
- if self.is_http_allowed is not None and response['is_http_allowed'] != self.is_http_allowed:
- self.log("is_http_allowed Diff - Origin {0} / Update {1}".format(response['is_http_allowed'], self.is_http_allowed))
- return True
-
- if self.is_https_allowed is not None and response['is_https_allowed'] != self.is_https_allowed:
- self.log("is_https_allowed Diff - Origin {0} / Update {1}".format(response['is_https_allowed'], self.is_https_allowed))
- return True
-
- if self.query_string_caching_behavior and \
- _snake_to_camel(response['query_string_caching_behavior']).lower() != _snake_to_camel(self.query_string_caching_behavior).lower():
- self.log("query_string_caching_behavior Diff - Origin {0} / Update {1}".format(response['query_string_caching_behavior'],
- self.query_string_caching_behavior))
- return True
-
- return False
-
- def get_cdn_client(self):
- if not self.cdn_client:
- self.cdn_client = self.get_mgmt_svc_client(CdnManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2017-04-02')
- return self.cdn_client
-
-
-def main():
- """Main execution"""
- AzureRMCdnendpoint()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint_info.py b/lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint_info.py
deleted file mode 100644
index bd1527c1ce..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint_info.py
+++ /dev/null
@@ -1,315 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Hai Cao, <t-haicao@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_cdnendpoint_info
-
-version_added: "2.9"
-
-short_description: Get Azure CDN endpoint facts
-
-description:
- - Get facts for a specific Azure CDN endpoint or all Azure CDN endpoints.
-
-options:
- resource_group:
- description:
- - Name of resource group where this CDN profile belongs to.
- required: true
- profile_name:
- description:
- - Name of CDN profile.
- required: true
- name:
- description:
- - Limit results to a specific Azure CDN endpoint.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Hai Cao (@caohai)
- - Yunge zhu (@yungezz)
-'''
-
-EXAMPLES = '''
- - name: Get facts for all endpoints in CDN profile
- azure_rm_cdnendpoint_info:
- resource_group: myResourceGroup
- profile_name: myCDNProfile
-
- - name: Get facts of specific CDN endpoint
- azure_rm_cdnendpoint_info:
- resource_group: myResourceGroup
- profile_name: myCDNProfile
- name: myEndpoint1
-'''
-
-RETURN = '''
-cdnendpoints:
- description: List of Azure CDN endpoints.
- returned: always
- type: complex
- contains:
- resource_group:
- description:
- - Name of a resource group where the Azure CDN endpoint exists.
- returned: always
- type: str
- sample: myResourceGroup
- name:
- description:
- - Name of the Azure CDN endpoint.
- returned: always
- type: str
- sample: myEndpoint
- profile_name:
- description:
- - Name of the Azure CDN profile that this endpoint is attached to.
- returned: always
- type: str
- sample: myProfile
- location:
- description:
- - Location of the Azure CDN endpoint.
- type: str
- sample: WestUS
- id:
- description:
- - ID of the Azure CDN endpoint.
- type: str
- sample:
- "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myCDN/providers/Microsoft.Cdn/profiles/myProfile/endpoints/myEndpoint1"
- provisioning_state:
- description:
- - Provisioning status of the Azure CDN endpoint.
- type: str
- sample: Succeeded
- resource_state:
- description:
- - Resource status of the profile.
- type: str
- sample: Running
- is_compression_enabled:
- description:
- - Indicates whether content compression is enabled on CDN.
- type: bool
- sample: true
- is_http_allowed:
- description:
- - Indicates whether HTTP traffic is allowed on the endpoint.
- type: bool
- sample: true
- is_https_allowed:
- description:
- - Indicates whether HTTPS traffic is allowed on the endpoint.
- type: bool
- sample: true
- query_string_caching_behavior:
- description:
- - Defines how CDN caches requests that include query strings.
- type: str
- sample: IgnoreQueryString
- content_types_to_compress:
- description:
- - List of content types on which compression applies.
- type: list
- sample: [
- "text/plain",
- "text/html",
- "text/css",
- "text/javascript",
- "application/x-javascript",
- "application/javascript",
- "application/json",
- "application/xml"
- ]
- origins:
- description:
- - The source of the content being delivered via CDN.
- sample: {
- "host_name": "xxxxxxxx.blob.core.windows.net",
- "http_port": null,
- "https_port": null,
- "name": "xxxxxxxx-blob-core-windows-net"
- }
- origin_host_header:
- description:
- - The host header value sent to the origin with each request.
- type: str
- sample: xxxxxxxx.blob.core.windows.net
- origin_path:
- description:
- - A directory path on the origin that CDN can use to retrieve content from.
- type: str
- sample: /pic/
- tags:
- description:
- - The tags of the Azure CDN endpoint.
- type: list
- sample: foo
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from azure.mgmt.cdn import CdnManagementClient
- from azure.mgmt.cdn.models import ErrorResponseException
- from azure.common import AzureHttpError
-except ImportError:
- # handled in azure_rm_common
- pass
-
-import re
-
-AZURE_OBJECT_CLASS = 'endpoints'
-
-
-class AzureRMCdnEndpointInfo(AzureRMModuleBase):
- """Utility class to get Azure Azure CDN endpoint facts"""
-
- def __init__(self):
-
- self.module_args = dict(
- name=dict(type='str'),
- resource_group=dict(
- type='str',
- required=True
- ),
- profile_name=dict(
- type='str',
- required=True
- ),
- tags=dict(type='list')
- )
-
- self.results = dict(
- changed=False,
- cdnendpoints=[]
- )
-
- self.name = None
- self.resource_group = None
- self.profile_name = None
- self.tags = None
-
- super(AzureRMCdnEndpointInfo, self).__init__(
- derived_arg_spec=self.module_args,
- supports_tags=False,
- facts_module=True
- )
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_cdnendpoint_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_cdnendpoint_facts' module has been renamed to 'azure_rm_cdnendpoint_info'", version='2.13')
-
- for key in self.module_args:
- setattr(self, key, kwargs[key])
-
- self.cdn_client = self.get_mgmt_svc_client(CdnManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2017-04-02')
-
- if self.name:
- self.results['cdnendpoints'] = self.get_item()
- else:
- self.results['cdnendpoints'] = self.list_by_profile()
-
- return self.results
-
- def get_item(self):
- """Get a single Azure Azure CDN endpoint"""
-
- self.log('Get properties for {0}'.format(self.name))
-
- item = None
- result = []
-
- try:
- item = self.cdn_client.endpoints.get(
- self.resource_group, self.profile_name, self.name)
- except ErrorResponseException:
- pass
-
- if item and self.has_tags(item.tags, self.tags):
- result = [self.serialize_cdnendpoint(item)]
-
- return result
-
- def list_by_profile(self):
- """Get all Azure Azure CDN endpoints within an Azure CDN profile"""
-
- self.log('List all Azure CDN endpoints within an Azure CDN profile')
-
- try:
- response = self.cdn_client.endpoints.list_by_profile(
- self.resource_group, self.profile_name)
- except ErrorResponseException as exc:
- self.fail('Failed to list all items - {0}'.format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.serialize_cdnendpoint(item))
-
- return results
-
- def serialize_cdnendpoint(self, cdnendpoint):
- '''
- Convert a Azure CDN endpoint object to dict.
- :param cdn: Azure CDN endpoint object
- :return: dict
- '''
- result = self.serialize_obj(cdnendpoint, AZURE_OBJECT_CLASS)
-
- new_result = {}
- new_result['id'] = cdnendpoint.id
- new_result['resource_group'] = re.sub('\\/.*', '', re.sub('.*resourcegroups\\/', '', result['id']))
- new_result['profile_name'] = re.sub('\\/.*', '', re.sub('.*profiles\\/', '', result['id']))
- new_result['name'] = cdnendpoint.name
- new_result['type'] = cdnendpoint.type
- new_result['location'] = cdnendpoint.location
- new_result['resource_state'] = cdnendpoint.resource_state
- new_result['provisioning_state'] = cdnendpoint.provisioning_state
- new_result['query_string_caching_behavior'] = cdnendpoint.query_string_caching_behavior
- new_result['is_compression_enabled'] = cdnendpoint.is_compression_enabled
- new_result['is_http_allowed'] = cdnendpoint.is_http_allowed
- new_result['is_https_allowed'] = cdnendpoint.is_https_allowed
- new_result['content_types_to_compress'] = cdnendpoint.content_types_to_compress
- new_result['origin_host_header'] = cdnendpoint.origin_host_header
- new_result['origin_path'] = cdnendpoint.origin_path
- new_result['origin'] = dict(
- name=cdnendpoint.origins[0].name,
- host_name=cdnendpoint.origins[0].host_name,
- http_port=cdnendpoint.origins[0].http_port,
- https_port=cdnendpoint.origins[0].https_port
- )
- new_result['tags'] = cdnendpoint.tags
- return new_result
-
-
-def main():
- """Main module execution code path"""
-
- AzureRMCdnEndpointInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_cdnprofile.py b/lib/ansible/modules/cloud/azure/azure_rm_cdnprofile.py
deleted file mode 100644
index a92adf2c68..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_cdnprofile.py
+++ /dev/null
@@ -1,304 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Hai Cao, <t-haicao@microsoft.com>, Yunge Zhu <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_cdnprofile
-version_added: "2.8"
-short_description: Manage a Azure CDN profile
-description:
- - Create, update and delete a Azure CDN profile.
-
-options:
- resource_group:
- description:
- - Name of a resource group where the CDN profile exists or will be created.
- required: true
- name:
- description:
- - Name of the CDN profile.
- required: true
- location:
- description:
- - Valid Azure location. Defaults to location of the resource group.
- sku:
- description:
- - The pricing tier, defines a CDN provider, feature list and rate of the CDN profile.
- - Detailed pricing can be find at U(https://azure.microsoft.com/en-us/pricing/details/cdn/).
- choices:
- - standard_verizon
- - premium_verizon
- - custom_verizon
- - standard_akamai
- - standard_chinacdn
- - standard_microsoft
- state:
- description:
- - Assert the state of the CDN profile. Use C(present) to create or update a CDN profile and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Hai Cao (@caohai)
- - Yunge Zhu (@yungezz)
-'''
-
-EXAMPLES = '''
- - name: Create a CDN profile
- azure_rm_cdnprofile:
- resource_group: myResourceGroup
- name: myCDN
- sku: standard_akamai
- tags:
- testing: testing
-
- - name: Delete the CDN profile
- azure_rm_cdnprofile:
- resource_group: myResourceGroup
- name: myCDN
- state: absent
-'''
-RETURN = '''
-id:
- description: Current state of the CDN profile.
- returned: always
- type: dict
- example:
- id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Cdn/profiles/myCDN
-'''
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from azure.mgmt.cdn.models import Profile, Sku, ErrorResponseException
- from azure.mgmt.cdn import CdnManagementClient
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def cdnprofile_to_dict(cdnprofile):
- return dict(
- id=cdnprofile.id,
- name=cdnprofile.name,
- type=cdnprofile.type,
- location=cdnprofile.location,
- sku=cdnprofile.sku.name,
- resource_state=cdnprofile.resource_state,
- provisioning_state=cdnprofile.provisioning_state,
- tags=cdnprofile.tags
- )
-
-
-class AzureRMCdnprofile(AzureRMModuleBase):
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- ),
- sku=dict(
- type='str',
- choices=['standard_verizon', 'premium_verizon', 'custom_verizon', 'standard_akamai', 'standard_chinacdn', 'standard_microsoft']
- )
- )
-
- self.resource_group = None
- self.name = None
- self.location = None
- self.state = None
- self.tags = None
- self.sku = None
-
- self.cdn_client = None
-
- required_if = [
- ('state', 'present', ['sku'])
- ]
-
- self.results = dict(changed=False)
-
- super(AzureRMCdnprofile, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True,
- required_if=required_if)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- self.cdn_client = self.get_cdn_client()
-
- to_be_updated = False
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- response = self.get_cdnprofile()
-
- if self.state == 'present':
-
- if not response:
- self.log("Need to create the CDN profile")
-
- if not self.check_mode:
- new_response = self.create_cdnprofile()
- self.results['id'] = new_response['id']
-
- self.results['changed'] = True
-
- else:
- self.log('Results : {0}'.format(response))
- update_tags, response['tags'] = self.update_tags(response['tags'])
-
- if response['provisioning_state'] == "Succeeded":
- if update_tags:
- to_be_updated = True
-
- if to_be_updated:
- self.log("Need to update the CDN profile")
-
- if not self.check_mode:
- new_response = self.update_cdnprofile()
- self.results['id'] = new_response['id']
-
- self.results['changed'] = True
-
- elif self.state == 'absent':
- if not response:
- self.fail("CDN profile {0} not exists.".format(self.name))
- else:
- self.log("Need to delete the CDN profile")
- self.results['changed'] = True
-
- if not self.check_mode:
- self.delete_cdnprofile()
- self.results['id'] = response['id']
-
- return self.results
-
- def create_cdnprofile(self):
- '''
- Creates a Azure CDN profile.
-
- :return: deserialized Azure CDN profile instance state dictionary
- '''
- self.log("Creating the Azure CDN profile instance {0}".format(self.name))
-
- parameters = Profile(
- location=self.location,
- sku=Sku(name=self.sku),
- tags=self.tags
- )
-
- import uuid
- xid = str(uuid.uuid1())
-
- try:
- poller = self.cdn_client.profiles.create(self.resource_group,
- self.name,
- parameters,
- custom_headers={'x-ms-client-request-id': xid}
- )
- response = self.get_poller_result(poller)
- return cdnprofile_to_dict(response)
- except ErrorResponseException as exc:
- self.log('Error attempting to create Azure CDN profile instance.')
- self.fail("Error creating Azure CDN profile instance: {0}.\n Request id: {1}".format(exc.message, xid))
-
- def update_cdnprofile(self):
- '''
- Updates a Azure CDN profile.
-
- :return: deserialized Azure CDN profile instance state dictionary
- '''
- self.log("Updating the Azure CDN profile instance {0}".format(self.name))
-
- try:
- poller = self.cdn_client.profiles.update(self.resource_group, self.name, self.tags)
- response = self.get_poller_result(poller)
- return cdnprofile_to_dict(response)
- except ErrorResponseException as exc:
- self.log('Error attempting to update Azure CDN profile instance.')
- self.fail("Error updating Azure CDN profile instance: {0}".format(exc.message))
-
- def delete_cdnprofile(self):
- '''
- Deletes the specified Azure CDN profile in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the CDN profile {0}".format(self.name))
- try:
- poller = self.cdn_client.profiles.delete(
- self.resource_group, self.name)
- self.get_poller_result(poller)
- return True
- except ErrorResponseException as e:
- self.log('Error attempting to delete the CDN profile.')
- self.fail("Error deleting the CDN profile: {0}".format(e.message))
- return False
-
- def get_cdnprofile(self):
- '''
- Gets the properties of the specified CDN profile.
-
- :return: deserialized CDN profile state dictionary
- '''
- self.log(
- "Checking if the CDN profile {0} is present".format(self.name))
- try:
- response = self.cdn_client.profiles.get(self.resource_group, self.name)
- self.log("Response : {0}".format(response))
- self.log("CDN profile : {0} found".format(response.name))
- return cdnprofile_to_dict(response)
- except ErrorResponseException:
- self.log('Did not find the CDN profile.')
- return False
-
- def get_cdn_client(self):
- if not self.cdn_client:
- self.cdn_client = self.get_mgmt_svc_client(CdnManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2017-04-02')
- return self.cdn_client
-
-
-def main():
- """Main execution"""
- AzureRMCdnprofile()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_cdnprofile_info.py b/lib/ansible/modules/cloud/azure/azure_rm_cdnprofile_info.py
deleted file mode 100644
index 3de696727b..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_cdnprofile_info.py
+++ /dev/null
@@ -1,268 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Hai Cao, <t-haicao@microsoft.com>, Yunge Zhu <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_cdnprofile_info
-
-version_added: "2.9"
-
-short_description: Get Azure CDN profile facts
-
-description:
- - Get facts for a specific Azure CDN profile or all CDN profiles.
-
-options:
- name:
- description:
- - Limit results to a specific CDN profile.
- resource_group:
- description:
- - The resource group to search for the desired CDN profile.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Hai Cao (@caohai)
- - Yunge Zhu (@yungezz)
-'''
-
-EXAMPLES = '''
- - name: Get facts for one CDN profile
- azure_rm_cdnprofile_info:
- name: Testing
- resource_group: myResourceGroup
-
- - name: Get facts for all CDN profiles
- azure_rm_cdnprofile_info:
-
- - name: Get facts by tags
- azure_rm_cdnprofile_info:
- tags:
- - Environment:Test
-'''
-
-RETURN = '''
-cdnprofiles:
- description: List of CDN profiles.
- returned: always
- type: complex
- contains:
- resource_group:
- description:
- - Name of a resource group where the CDN profile exists.
- returned: always
- type: str
- sample: myResourceGroup
- name:
- description:
- - Name of the CDN profile.
- returned: always
- type: str
- sample: Testing
- location:
- description:
- - Location of the CDN profile.
- type: str
- sample: WestUS
- id:
- description:
- - ID of the CDN profile.
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Cdn/profiles/myCDN
- provisioning_state:
- description:
- - Provisioning status of the profile.
- type: str
- sample: Succeeded
- resource_state:
- description:
- - Resource status of the profile.
- type: str
- sample: Active
- sku:
- description:
- - The pricing tier, defines a CDN provider, feature list and rate of the CDN profile.
- type: str
- sample: standard_verizon
- type:
- description:
- - The type of the CDN profile.
- type: str
- sample: Microsoft.Cdn/profiles
- tags:
- description:
- - The tags of the CDN profile.
- type: list
- sample: [
- {"foo": "bar"}
- ]
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from azure.mgmt.cdn.models import ErrorResponseException
- from azure.common import AzureHttpError
- from azure.mgmt.cdn import CdnManagementClient
-except Exception:
- # handled in azure_rm_common
- pass
-
-import re
-
-AZURE_OBJECT_CLASS = 'profiles'
-
-
-class AzureRMCdnprofileInfo(AzureRMModuleBase):
- """Utility class to get Azure CDN profile facts"""
-
- def __init__(self):
-
- self.module_args = dict(
- name=dict(type='str'),
- resource_group=dict(type='str'),
- tags=dict(type='list')
- )
-
- self.results = dict(
- changed=False,
- cdnprofiles=[]
- )
-
- self.name = None
- self.resource_group = None
- self.tags = None
- self.cdn_client = None
-
- super(AzureRMCdnprofileInfo, self).__init__(
- derived_arg_spec=self.module_args,
- supports_tags=False,
- facts_module=True
- )
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_cdnprofile_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_cdnprofile_facts' module has been renamed to 'azure_rm_cdnprofile_info'", version='2.13')
-
- for key in self.module_args:
- setattr(self, key, kwargs[key])
-
- self.cdn_client = self.get_cdn_client()
-
- if self.name and not self.resource_group:
- self.fail("Parameter error: resource group required when filtering by name.")
-
- if self.name:
- self.results['cdnprofiles'] = self.get_item()
- elif self.resource_group:
- self.results['cdnprofiles'] = self.list_resource_group()
- else:
- self.results['cdnprofiles'] = self.list_all()
-
- return self.results
-
- def get_item(self):
- """Get a single Azure CDN profile"""
-
- self.log('Get properties for {0}'.format(self.name))
-
- item = None
- result = []
-
- try:
- item = self.cdn_client.profiles.get(
- self.resource_group, self.name)
- except ErrorResponseException:
- pass
-
- if item and self.has_tags(item.tags, self.tags):
- result = [self.serialize_cdnprofile(item)]
-
- return result
-
- def list_resource_group(self):
- """Get all Azure CDN profiles within a resource group"""
-
- self.log('List all Azure CDNs within a resource group')
-
- try:
- response = self.cdn_client.profiles.list_by_resource_group(
- self.resource_group)
- except AzureHttpError as exc:
- self.fail('Failed to list all items - {0}'.format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.serialize_cdnprofile(item))
-
- return results
-
- def list_all(self):
- """Get all Azure CDN profiles within a subscription"""
- self.log('List all CDN profiles within a subscription')
- try:
- response = self.cdn_client.profiles.list()
- except Exception as exc:
- self.fail("Error listing all items - {0}".format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.serialize_cdnprofile(item))
- return results
-
- def serialize_cdnprofile(self, cdnprofile):
- '''
- Convert a CDN profile object to dict.
- :param cdn: CDN profile object
- :return: dict
- '''
- result = self.serialize_obj(cdnprofile, AZURE_OBJECT_CLASS)
-
- new_result = {}
- new_result['id'] = cdnprofile.id
- new_result['resource_group'] = re.sub('\\/.*', '', re.sub('.*resourcegroups\\/', '', result['id']))
- new_result['name'] = cdnprofile.name
- new_result['type'] = cdnprofile.type
- new_result['location'] = cdnprofile.location
- new_result['resource_state'] = cdnprofile.resource_state
- new_result['sku'] = cdnprofile.sku.name
- new_result['provisioning_state'] = cdnprofile.provisioning_state
- new_result['tags'] = cdnprofile.tags
- return new_result
-
- def get_cdn_client(self):
- if not self.cdn_client:
- self.cdn_client = self.get_mgmt_svc_client(CdnManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2017-04-02')
- return self.cdn_client
-
-
-def main():
- """Main module execution code path"""
-
- AzureRMCdnprofileInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_containerinstance.py b/lib/ansible/modules/cloud/azure/azure_rm_containerinstance.py
deleted file mode 100644
index 5b2d04fa7d..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_containerinstance.py
+++ /dev/null
@@ -1,529 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_containerinstance
-version_added: "2.5"
-short_description: Manage an Azure Container Instance
-description:
- - Create, update and delete an Azure Container Instance.
-
-options:
- resource_group:
- description:
- - Name of resource group.
- required: true
- name:
- description:
- - The name of the container group.
- required: true
- os_type:
- description:
- - The OS type of containers.
- choices:
- - linux
- - windows
- default: linux
- state:
- description:
- - Assert the state of the container instance. Use C(present) to create or update an container instance and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
- ip_address:
- description:
- - The IP address type of the container group.
- - Default is C(none) and creating an instance without public IP.
- choices:
- - public
- - none
- default: 'none'
- dns_name_label:
- description:
- - The Dns name label for the IP.
- type: str
- version_added: "2.8"
- ports:
- description:
- - List of ports exposed within the container group.
- - This option is deprecated, using I(ports) under I(containers)".
- type: list
- location:
- description:
- - Valid azure location. Defaults to location of the resource group.
- registry_login_server:
- description:
- - The container image registry login server.
- registry_username:
- description:
- - The username to log in container image registry server.
- registry_password:
- description:
- - The password to log in container image registry server.
- containers:
- description:
- - List of containers.
- - Required when creation.
- suboptions:
- name:
- description:
- - The name of the container instance.
- required: true
- image:
- description:
- - The container image name.
- required: true
- memory:
- description:
- - The required memory of the containers in GB.
- type: float
- default: 1.5
- cpu:
- description:
- - The required number of CPU cores of the containers.
- type: float
- default: 1
- ports:
- description:
- - List of ports exposed within the container group.
- type: list
- environment_variables:
- description:
- - List of container environment variables.
- - When updating existing container all existing variables will be replaced by new ones.
- type: dict
- suboptions:
- name:
- description:
- - Environment variable name.
- type: str
- value:
- description:
- - Environment variable value.
- type: str
- is_secure:
- description:
- - Is variable secure.
- type: bool
- version_added: "2.8"
- commands:
- description:
- - List of commands to execute within the container instance in exec form.
- - When updating existing container all existing commands will be replaced by new ones.
- type: list
- version_added: "2.8"
- restart_policy:
- description:
- - Restart policy for all containers within the container group.
- type: str
- choices:
- - always
- - on_failure
- - never
- version_added: "2.8"
- force_update:
- description:
- - Force update of existing container instance. Any update will result in deletion and recreation of existing containers.
- type: bool
- default: 'no'
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Create sample container group
- azure_rm_containerinstance:
- resource_group: myResourceGroup
- name: myContainerInstanceGroup
- os_type: linux
- ip_address: public
- containers:
- - name: myContainer1
- image: httpd
- memory: 1.5
- ports:
- - 80
- - 81
-'''
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerInstance/containerGroups/aci1b6dd89
-provisioning_state:
- description:
- - Provisioning state of the container.
- returned: always
- type: str
- sample: Creating
-ip_address:
- description:
- - Public IP Address of created container group.
- returned: if address is public
- type: str
- sample: 175.12.233.11
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _snake_to_camel
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from azure.mgmt.containerinstance import ContainerInstanceManagementClient
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def create_container_dict_from_obj(container):
- '''
- Create a dict from an instance of a Container.
-
- :param rule: Container
- :return: dict
- '''
- results = dict(
- name=container.name,
- image=container.image,
- memory=container.resources.requests.memory_in_gb,
- cpu=container.resources.requests.cpu
- # command (list of str)
- # ports (list of ContainerPort)
- # environment_variables (list of EnvironmentVariable)
- # resources (ResourceRequirements)
- # volume mounts (list of VolumeMount)
- )
-
- if container.instance_view is not None:
- # instance_view (ContainerPropertiesInstanceView)
- results["instance_restart_count"] = container.instance_view.restart_count
- if container.instance_view.current_state:
- results["instance_current_state"] = container.instance_view.current_state.state
- results["instance_current_start_time"] = container.instance_view.current_state.start_time
- results["instance_current_exit_code"] = container.instance_view.current_state.exit_code
- results["instance_current_finish_time"] = container.instance_view.current_state.finish_time
- results["instance_current_detail_status"] = container.instance_view.current_state.detail_status
- if container.instance_view.previous_state:
- results["instance_previous_state"] = container.instance_view.previous_state.state
- results["instance_previous_start_time"] = container.instance_view.previous_state.start_time
- results["instance_previous_exit_code"] = container.instance_view.previous_state.exit_code
- results["instance_previous_finish_time"] = container.instance_view.previous_state.finish_time
- results["instance_previous_detail_status"] = container.instance_view.previous_state.detail_status
- # events (list of ContainerEvent)
- return results
-
-
-env_var_spec = dict(
- name=dict(type='str', required=True),
- value=dict(type='str', required=True),
- is_secure=dict(type='bool')
-)
-
-
-container_spec = dict(
- name=dict(type='str', required=True),
- image=dict(type='str', required=True),
- memory=dict(type='float', default=1.5),
- cpu=dict(type='float', default=1),
- ports=dict(type='list', elements='int'),
- commands=dict(type='list', elements='str'),
- environment_variables=dict(type='list', elements='dict', options=env_var_spec)
-)
-
-
-class AzureRMContainerInstance(AzureRMModuleBase):
- """Configuration class for an Azure RM container instance resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- os_type=dict(
- type='str',
- default='linux',
- choices=['linux', 'windows']
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- ),
- location=dict(
- type='str',
- ),
- ip_address=dict(
- type='str',
- default='none',
- choices=['public', 'none']
- ),
- dns_name_label=dict(
- type='str',
- ),
- ports=dict(
- type='list',
- default=[]
- ),
- registry_login_server=dict(
- type='str',
- default=None
- ),
- registry_username=dict(
- type='str',
- default=None
- ),
- registry_password=dict(
- type='str',
- default=None,
- no_log=True
- ),
- containers=dict(
- type='list',
- elements='dict',
- options=container_spec
- ),
- restart_policy=dict(
- type='str',
- choices=['always', 'on_failure', 'never']
- ),
- force_update=dict(
- type='bool',
- default=False
- ),
- )
-
- self.resource_group = None
- self.name = None
- self.location = None
- self.state = None
- self.ip_address = None
- self.dns_name_label = None
- self.containers = None
- self.restart_policy = None
-
- self.tags = None
-
- self.results = dict(changed=False, state=dict())
- self.cgmodels = None
-
- required_if = [
- ('state', 'present', ['containers'])
- ]
-
- super(AzureRMContainerInstance, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True,
- required_if=required_if)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- resource_group = None
- response = None
- results = dict()
-
- # since this client hasn't been upgraded to expose models directly off the OperationClass, fish them out
- self.cgmodels = self.containerinstance_client.container_groups.models
-
- resource_group = self.get_resource_group(self.resource_group)
-
- if not self.location:
- self.location = resource_group.location
-
- response = self.get_containerinstance()
-
- if not response:
- self.log("Container Group doesn't exist")
-
- if self.state == 'absent':
- self.log("Nothing to delete")
- else:
- self.force_update = True
- else:
- self.log("Container instance already exists")
-
- if self.state == 'absent':
- if not self.check_mode:
- self.delete_containerinstance()
- self.results['changed'] = True
- self.log("Container instance deleted")
- elif self.state == 'present':
- self.log("Need to check if container group has to be deleted or may be updated")
- update_tags, newtags = self.update_tags(response.get('tags', dict()))
- if update_tags:
- self.tags = newtags
-
- if self.force_update:
- self.log('Deleting container instance before update')
- if not self.check_mode:
- self.delete_containerinstance()
-
- if self.state == 'present':
-
- self.log("Need to Create / Update the container instance")
-
- if self.force_update:
- self.results['changed'] = True
- if self.check_mode:
- return self.results
- response = self.create_update_containerinstance()
-
- self.results['id'] = response['id']
- self.results['provisioning_state'] = response['provisioning_state']
- self.results['ip_address'] = response['ip_address']['ip'] if 'ip_address' in response else ''
-
- self.log("Creation / Update done")
-
- return self.results
-
- def create_update_containerinstance(self):
- '''
- Creates or updates a container service with the specified configuration of orchestrator, masters, and agents.
-
- :return: deserialized container instance state dictionary
- '''
- self.log("Creating / Updating the container instance {0}".format(self.name))
-
- registry_credentials = None
-
- if self.registry_login_server is not None:
- registry_credentials = [self.cgmodels.ImageRegistryCredential(server=self.registry_login_server,
- username=self.registry_username,
- password=self.registry_password)]
-
- ip_address = None
-
- containers = []
- all_ports = set([])
- for container_def in self.containers:
- name = container_def.get("name")
- image = container_def.get("image")
- memory = container_def.get("memory")
- cpu = container_def.get("cpu")
- commands = container_def.get("commands")
- ports = []
- variables = []
-
- port_list = container_def.get("ports")
- if port_list:
- for port in port_list:
- all_ports.add(port)
- ports.append(self.cgmodels.ContainerPort(port=port))
-
- variable_list = container_def.get("environment_variables")
- if variable_list:
- for variable in variable_list:
- variables.append(self.cgmodels.EnvironmentVariable(name=variable.get('name'),
- value=variable.get('value') if not variable.get('is_secure') else None,
- secure_value=variable.get('value') if variable.get('is_secure') else None))
-
- containers.append(self.cgmodels.Container(name=name,
- image=image,
- resources=self.cgmodels.ResourceRequirements(
- requests=self.cgmodels.ResourceRequests(memory_in_gb=memory, cpu=cpu)
- ),
- ports=ports,
- command=commands,
- environment_variables=variables))
-
- if self.ip_address == 'public':
- # get list of ports
- if len(all_ports) > 0:
- ports = []
- for port in all_ports:
- ports.append(self.cgmodels.Port(port=port, protocol="TCP"))
- ip_address = self.cgmodels.IpAddress(ports=ports, dns_name_label=self.dns_name_label, type='public')
-
- parameters = self.cgmodels.ContainerGroup(location=self.location,
- containers=containers,
- image_registry_credentials=registry_credentials,
- restart_policy=_snake_to_camel(self.restart_policy, True) if self.restart_policy else None,
- ip_address=ip_address,
- os_type=self.os_type,
- volumes=None,
- tags=self.tags)
-
- try:
- response = self.containerinstance_client.container_groups.create_or_update(resource_group_name=self.resource_group,
- container_group_name=self.name,
- container_group=parameters)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
- except CloudError as exc:
- self.fail("Error when creating ACI {0}: {1}".format(self.name, exc.message or str(exc)))
-
- return response.as_dict()
-
- def delete_containerinstance(self):
- '''
- Deletes the specified container group instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the container instance {0}".format(self.name))
- try:
- response = self.containerinstance_client.container_groups.delete(resource_group_name=self.resource_group, container_group_name=self.name)
- return True
- except CloudError as exc:
- self.fail('Error when deleting ACI {0}: {1}'.format(self.name, exc.message or str(exc)))
- return False
-
- def get_containerinstance(self):
- '''
- Gets the properties of the specified container service.
-
- :return: deserialized container instance state dictionary
- '''
- self.log("Checking if the container instance {0} is present".format(self.name))
- found = False
- try:
- response = self.containerinstance_client.container_groups.get(resource_group_name=self.resource_group, container_group_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("Container instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the container instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMContainerInstance()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_containerinstance_info.py b/lib/ansible/modules/cloud/azure/azure_rm_containerinstance_info.py
deleted file mode 100644
index 8968c33539..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_containerinstance_info.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_containerinstance_info
-version_added: "2.9"
-short_description: Get Azure Container Instance facts
-description:
- - Get facts of Container Instance.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- name:
- description:
- - The name of the container instance.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get specific Container Instance facts
- azure_rm_containerinstance_info:
- resource_group: myResourceGroup
- name: myContainer
-
- - name: List Container Instances in a specified resource group name
- azure_rm_containerinstance_info:
- resource_group: myResourceGroup
-'''
-
-RETURN = '''
-container_groups:
- description: A list of Container Instance dictionaries.
- returned: always
- type: complex
- contains:
- id:
- description:
- - The resource id.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerInstance/contain
- erGroups/myContainer"
- resource_group:
- description:
- - Resource group where the container exists.
- returned: always
- type: str
- sample: testrg
- name:
- description:
- - The resource name.
- returned: always
- type: str
- sample: mycontainers
- location:
- description:
- - The resource location.
- returned: always
- type: str
- sample: westus
- os_type:
- description:
- - The OS type of containers.
- returned: always
- type: str
- sample: linux
- ip_address:
- description:
- - IP address of the container instance.
- returned: always
- type: str
- sample: 173.15.18.1
- dns_name_label:
- description:
- - The Dns name label for the IP.
- returned: always
- type: str
- sample: mydomain
- ports:
- description:
- - List of ports exposed by the container instance.
- returned: always
- type: list
- sample: [ 80, 81 ]
- containers:
- description:
- - The containers within the container group.
- returned: always
- type: complex
- sample: containers
- contains:
- name:
- description:
- - The name of the container instance.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerInstance
- /containerGroups/myContainer"
- image:
- description:
- - The container image name.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerInstance
- /containerGroups/myContainer"
- memory:
- description:
- - The required memory of the containers in GB.
- returned: always
- type: float
- sample: 1.5
- cpu:
- description:
- - The required number of CPU cores of the containers.
- returned: always
- type: int
- sample: 1
- ports:
- description:
- - List of ports exposed within the container group.
- returned: always
- type: list
- sample: [ 80, 81 ]
- commands:
- description:
- - List of commands to execute within the container instance in exec form.
- returned: always
- type: list
- sample: [ "pip install abc" ]
- environment_variables:
- description:
- - List of container environment variables.
- type: complex
- contains:
- name:
- description:
- - Environment variable name.
- type: str
- value:
- description:
- - Environment variable value.
- type: str
- tags:
- description: Tags assigned to the resource. Dictionary of string:string pairs.
- type: dict
- sample: { "tag1": "abc" }
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _camel_to_snake
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.azure_operation import AzureOperationPoller
- from azure.mgmt.containerinstance import ContainerInstanceManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMContainerInstanceInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False,
- )
- self.resource_group = None
- self.name = None
-
- super(AzureRMContainerInstanceInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_containerinstance_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_containerinstance_facts' module has been renamed to 'azure_rm_containerinstance_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if (self.name is not None):
- self.results['containerinstances'] = self.get()
- elif (self.resource_group is not None):
- self.results['containerinstances'] = self.list_by_resource_group()
- else:
- self.results['containerinstances'] = self.list_all()
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.containerinstance_client.container_groups.get(resource_group_name=self.resource_group,
- container_group_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Container Instances.')
-
- if response is not None and self.has_tags(response.tags, self.tags):
- results.append(self.format_item(response))
-
- return results
-
- def list_by_resource_group(self):
- response = None
- results = []
- try:
- response = self.containerinstance_client.container_groups.list_by_resource_group(resource_group_name=self.resource_group)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not list facts for Container Instances.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_item(item))
-
- return results
-
- def list_all(self):
- response = None
- results = []
- try:
- response = self.containerinstance_client.container_groups.list()
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not list facts for Container Instances.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_item(item))
-
- return results
-
- def format_item(self, item):
- d = item.as_dict()
- containers = d['containers']
- ports = d['ip_address']['ports'] if 'ip_address' in d else []
- resource_group = d['id'].split('resourceGroups/')[1].split('/')[0]
-
- for port_index in range(len(ports)):
- ports[port_index] = ports[port_index]['port']
-
- for container_index in range(len(containers)):
- old_container = containers[container_index]
- new_container = {
- 'name': old_container['name'],
- 'image': old_container['image'],
- 'memory': old_container['resources']['requests']['memory_in_gb'],
- 'cpu': old_container['resources']['requests']['cpu'],
- 'ports': [],
- 'commands': old_container.get('command'),
- 'environment_variables': old_container.get('environment_variables')
- }
- for port_index in range(len(old_container['ports'])):
- new_container['ports'].append(old_container['ports'][port_index]['port'])
- containers[container_index] = new_container
-
- d = {
- 'id': d['id'],
- 'resource_group': resource_group,
- 'name': d['name'],
- 'os_type': d['os_type'],
- 'dns_name_label': d['ip_address'].get('dns_name_label'),
- 'ip_address': d['ip_address']['ip'] if 'ip_address' in d else '',
- 'ports': ports,
- 'location': d['location'],
- 'containers': containers,
- 'restart_policy': _camel_to_snake(d.get('restart_policy')) if d.get('restart_policy') else None,
- 'tags': d.get('tags', None)
- }
- return d
-
-
-def main():
- AzureRMContainerInstanceInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_containerregistry.py b/lib/ansible/modules/cloud/azure/azure_rm_containerregistry.py
deleted file mode 100644
index 8ae6f8be93..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_containerregistry.py
+++ /dev/null
@@ -1,411 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Yawei Wang, <yaweiw@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_containerregistry
-version_added: "2.5"
-short_description: Manage an Azure Container Registry
-description:
- - Create, update and delete an Azure Container Registry.
-
-options:
- resource_group:
- description:
- - Name of a resource group where the Container Registry exists or will be created.
- required: true
- name:
- description:
- - Name of the Container Registry.
- required: true
- state:
- description:
- - Assert the state of the container registry. Use C(present) to create or update an container registry and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
- location:
- description:
- - Valid azure location. Defaults to location of the resource group.
- admin_user_enabled:
- description:
- - If enabled, you can use the registry name as username and admin user access key as password to docker login to your container registry.
- type: bool
- default: no
- sku:
- description:
- - Specifies the SKU to use. Currently can be either C(Basic), C(Standard) or C(Premium).
- default: Standard
- choices:
- - Basic
- - Standard
- - Premium
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yawei Wang (@yaweiw)
-
-'''
-
-EXAMPLES = '''
- - name: Create an azure container registry
- azure_rm_containerregistry:
- name: myRegistry
- location: eastus
- resource_group: myResourceGroup
- admin_user_enabled: true
- sku: Premium
- tags:
- Release: beta1
- Environment: Production
-
- - name: Remove an azure container registry
- azure_rm_containerregistry:
- name: myRegistry
- resource_group: myResourceGroup
- state: absent
-'''
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerRegistry/registries/myRegistry
-name:
- description:
- - Registry name.
- returned: always
- type: str
- sample: myregistry
-location:
- description:
- - Resource location.
- returned: always
- type: str
- sample: westus
-admin_user_enabled:
- description:
- - Is admin user enabled.
- returned: always
- type: bool
- sample: true
-sku:
- description:
- - The SKU name of the container registry.
- returned: always
- type: str
- sample: Standard
-provisioning_state:
- description:
- - Provisioning state.
- returned: always
- type: str
- sample: Succeeded
-login_server:
- description:
- - Registry login server.
- returned: always
- type: str
- sample: myregistry.azurecr.io
-credentials:
- description:
- - Passwords defined for the registry.
- returned: always
- type: complex
- contains:
- password:
- description:
- - password value.
- returned: when registry exists and C(admin_user_enabled) is set
- type: str
- sample: pass1value
- password2:
- description:
- - password2 value.
- returned: when registry exists and C(admin_user_enabled) is set
- type: str
- sample: pass2value
-tags:
- description:
- - Tags assigned to the resource. Dictionary of string:string pairs.
- returned: always
- type: dict
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.containerregistry.models import (
- Registry,
- RegistryUpdateParameters,
- StorageAccountProperties,
- Sku,
- SkuName,
- SkuTier,
- ProvisioningState,
- PasswordName,
- WebhookCreateParameters,
- WebhookUpdateParameters,
- WebhookAction,
- WebhookStatus
- )
- from azure.mgmt.containerregistry import ContainerRegistryManagementClient
-except ImportError as exc:
- # This is handled in azure_rm_common
- pass
-
-
-def create_containerregistry_dict(registry, credentials):
- '''
- Helper method to deserialize a ContainerRegistry to a dict
- :param: registry: return container registry object from Azure rest API call
- :param: credentials: return credential objects from Azure rest API call
- :return: dict of return container registry and it's credentials
- '''
- results = dict(
- id=registry.id if registry is not None else "",
- name=registry.name if registry is not None else "",
- location=registry.location if registry is not None else "",
- admin_user_enabled=registry.admin_user_enabled if registry is not None else "",
- sku=registry.sku.name if registry is not None else "",
- provisioning_state=registry.provisioning_state if registry is not None else "",
- login_server=registry.login_server if registry is not None else "",
- credentials=dict(),
- tags=registry.tags if registry is not None else ""
- )
- if credentials:
- results['credentials'] = dict(
- password=credentials.passwords[0].value,
- password2=credentials.passwords[1].value
- )
-
- return results
-
-
-class Actions:
- NoAction, Create, Update = range(3)
-
-
-class AzureRMContainerRegistry(AzureRMModuleBase):
- """Configuration class for an Azure RM container registry resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- ),
- location=dict(
- type='str'
- ),
- admin_user_enabled=dict(
- type='bool',
- default=False
- ),
- sku=dict(
- type='str',
- default='Standard',
- choices=['Basic', 'Standard', 'Premium']
- )
- )
-
- self.resource_group = None
- self.name = None
- self.location = None
- self.state = None
- self.sku = None
- self.tags = None
-
- self.results = dict(changed=False, state=dict())
-
- super(AzureRMContainerRegistry, self).__init__(
- derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- resource_group = None
- response = None
- to_do = Actions.NoAction
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- # Check if the container registry instance already present in the RG
- if self.state == 'present':
- response = self.get_containerregistry()
-
- if not response:
- to_do = Actions.Create
- else:
- self.log('Results : {0}'.format(response))
- self.results.update(response)
- if response['provisioning_state'] == "Succeeded":
- to_do = Actions.NoAction
- if (self.location is not None) and self.location != response['location']:
- to_do = Actions.Update
- elif (self.sku is not None) and self.sku != response['sku']:
- to_do = Actions.Update
- else:
- to_do = Actions.NoAction
-
- self.log("Create / Update the container registry instance")
- if self.check_mode:
- return self.results
-
- self.results.update(self.create_update_containerregistry(to_do))
- if to_do != Actions.NoAction:
- self.results['changed'] = True
- else:
- self.results['changed'] = False
-
- self.log("Container registry instance created or updated")
- elif self.state == 'absent':
- if self.check_mode:
- return self.results
- self.delete_containerregistry()
- self.log("Container registry instance deleted")
-
- return self.results
-
- def create_update_containerregistry(self, to_do):
- '''
- Creates or updates a container registry.
-
- :return: deserialized container registry instance state dictionary
- '''
- self.log("Creating / Updating the container registry instance {0}".format(self.name))
-
- try:
- if to_do != Actions.NoAction:
- if to_do == Actions.Create:
- name_status = self.containerregistry_client.registries.check_name_availability(self.name)
- if name_status.name_available:
- poller = self.containerregistry_client.registries.create(
- resource_group_name=self.resource_group,
- registry_name=self.name,
- registry=Registry(
- location=self.location,
- sku=Sku(
- name=self.sku
- ),
- tags=self.tags,
- admin_user_enabled=self.admin_user_enabled
- )
- )
- else:
- raise Exception("Invalid registry name. reason: " + name_status.reason + " message: " + name_status.message)
- else:
- registry = self.containerregistry_client.registries.get(self.resource_group, self.name)
- if registry is not None:
- poller = self.containerregistry_client.registries.update(
- resource_group_name=self.resource_group,
- registry_name=self.name,
- registry_update_parameters=RegistryUpdateParameters(
- sku=Sku(
- name=self.sku
- ),
- tags=self.tags,
- admin_user_enabled=self.admin_user_enabled
- )
- )
- else:
- raise Exception("Update registry failed as registry '" + self.name + "' doesn't exist.")
- response = self.get_poller_result(poller)
- if self.admin_user_enabled:
- credentials = self.containerregistry_client.registries.list_credentials(self.resource_group, self.name)
- else:
- self.log('Cannot perform credential operations as admin user is disabled')
- credentials = None
- else:
- response = None
- credentials = None
- except (CloudError, Exception) as exc:
- self.log('Error attempting to create / update the container registry instance.')
- self.fail("Error creating / updating the container registry instance: {0}".format(str(exc)))
- return create_containerregistry_dict(response, credentials)
-
- def delete_containerregistry(self):
- '''
- Deletes the specified container registry in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the container registry instance {0}".format(self.name))
- try:
- self.containerregistry_client.registries.delete(self.resource_group, self.name).wait()
- except CloudError as e:
- self.log('Error attempting to delete the container registry instance.')
- self.fail("Error deleting the container registry instance: {0}".format(str(e)))
-
- return True
-
- def get_containerregistry(self):
- '''
- Gets the properties of the specified container registry.
-
- :return: deserialized container registry state dictionary
- '''
- self.log("Checking if the container registry instance {0} is present".format(self.name))
- found = False
- try:
- response = self.containerregistry_client.registries.get(self.resource_group, self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("Container registry instance : {0} found".format(response.name))
- except CloudError as e:
- if e.error.error == 'ResourceNotFound':
- self.log('Did not find the container registry instance: {0}'.format(str(e)))
- else:
- self.fail('Error while trying to get container registry instance: {0}'.format(str(e)))
- response = None
- if found is True and self.admin_user_enabled is True:
- try:
- credentials = self.containerregistry_client.registries.list_credentials(self.resource_group, self.name)
- except CloudError as e:
- self.fail('List registry credentials failed: {0}'.format(str(e)))
- credentials = None
- elif found is True and self.admin_user_enabled is False:
- credentials = None
- else:
- return None
- return create_containerregistry_dict(response, credentials)
-
-
-def main():
- """Main execution"""
- AzureRMContainerRegistry()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_containerregistry_info.py b/lib/ansible/modules/cloud/azure/azure_rm_containerregistry_info.py
deleted file mode 100644
index 936fc23012..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_containerregistry_info.py
+++ /dev/null
@@ -1,283 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_containerregistry_info
-version_added: "2.9"
-short_description: Get Azure Container Registry facts
-description:
- - Get facts for Container Registry.
-
-options:
- resource_group:
- description:
- - The name of the resource group to which the container registry belongs.
- required: True
- name:
- description:
- - The name of the container registry.
- retrieve_credentials:
- description:
- - Retrieve credentials for container registry.
- type: bool
- default: no
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of Registry
- azure_rm_containerregistry_info:
- resource_group: myResourceGroup
- name: myRegistry
-
- - name: List instances of Registry
- azure_rm_containerregistry_info:
- resource_group: myResourceGroup
-'''
-
-RETURN = '''
-registries:
- description:
- - A list of dictionaries containing facts for registries.
- returned: always
- type: complex
- contains:
- id:
- description:
- - The resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerRegistry/registr
- ies/myRegistry"
- name:
- description:
- - The name of the resource.
- returned: always
- type: str
- sample: myRegistry
- location:
- description:
- - The location of the resource. This cannot be changed after the resource is created.
- returned: always
- type: str
- sample: westus
- admin_user_enabled:
- description:
- - Is admin user enabled.
- returned: always
- type: bool
- sample: yes
- sku:
- description:
- - The SKU name of the container registry.
- returned: always
- type: str
- sample: Premium
- provisioning_state:
- description:
- - Provisioning state of the container registry.
- returned: always
- type: str
- sample: Succeeded
- login_server:
- description:
- - Login server for the registry.
- returned: always
- type: str
- sample: acrd08521b.azurecr.io
- credentials:
- description:
- - Credentials, fields will be empty if admin user is not enabled for ACR.
- returned: when C(retrieve_credentials) is set and C(admin_user_enabled) is set on ACR
- type: complex
- contains:
- username:
- description:
- - The user name for container registry.
- returned: when registry exists and C(admin_user_enabled) is set
- type: str
- sample: zim
- password:
- description:
- - password value.
- returned: when registry exists and C(admin_user_enabled) is set
- type: str
- sample: pass1value
- password2:
- description:
- - password2 value.
- returned: when registry exists and C(admin_user_enabled) is set
- type: str
- sample: pass2value
- tags:
- description:
- - Tags assigned to the resource. Dictionary of string:string pairs.
- type: dict
- sample: { "tag1": "abc" }
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.azure_operation import AzureOperationPoller
- from azure.mgmt.containerregistry import ContainerRegistryManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMContainerRegistryInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- ),
- retrieve_credentials=dict(
- type='bool',
- default=False
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.resource_group = None
- self.name = None
- self.retrieve_credentials = False
-
- super(AzureRMContainerRegistryInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_containerregistry_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_containerregistry_facts' module has been renamed to 'azure_rm_containerregistry_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name:
- self.results['registries'] = self.get()
- elif self.resource_group:
- self.results['registries'] = self.list_by_resource_group()
- else:
- self.results['registries'] = self.list_all()
-
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.containerregistry_client.registries.get(resource_group_name=self.resource_group,
- registry_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Registries.')
-
- if response is not None:
- if self.has_tags(response.tags, self.tags):
- results.append(self.format_item(response))
-
- return results
-
- def list_all(self):
- response = None
- results = []
- try:
- response = self.containerregistry_client.registries.list()
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not get facts for Registries.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_item(item))
- return results
-
- def list_by_resource_group(self):
- response = None
- results = []
- try:
- response = self.containerregistry_client.registries.list_by_resource_group(resource_group_name=self.resource_group)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not get facts for Registries.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_item(item))
- return results
-
- def format_item(self, item):
- d = item.as_dict()
- resource_group = d['id'].split('resourceGroups/')[1].split('/')[0]
- name = d['name']
- credentials = {}
- admin_user_enabled = d['admin_user_enabled']
-
- if self.retrieve_credentials and admin_user_enabled:
- credentials = self.containerregistry_client.registries.list_credentials(resource_group, name).as_dict()
- for index in range(len(credentials['passwords'])):
- password = credentials['passwords'][index]
- if password['name'] == 'password':
- credentials['password'] = password['value']
- elif password['name'] == 'password2':
- credentials['password2'] = password['value']
- credentials.pop('passwords')
-
- d = {
- 'resource_group': resource_group,
- 'name': d['name'],
- 'location': d['location'],
- 'admin_user_enabled': admin_user_enabled,
- 'sku': d['sku']['tier'].lower(),
- 'provisioning_state': d['provisioning_state'],
- 'login_server': d['login_server'],
- 'id': d['id'],
- 'tags': d.get('tags', None),
- 'credentials': credentials
- }
- return d
-
-
-def main():
- AzureRMContainerRegistryInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount.py b/lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount.py
deleted file mode 100644
index a6bb1998a8..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount.py
+++ /dev/null
@@ -1,587 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_cosmosdbaccount
-version_added: "2.8"
-short_description: Manage Azure Database Account instance
-description:
- - Create, update and delete instance of Azure Database Account.
-
-options:
- resource_group:
- description:
- - Name of an Azure resource group.
- required: True
- name:
- description:
- - Cosmos DB database account name.
- required: True
- location:
- description:
- - The location of the resource group to which the resource belongs.
- - Required when I(state=present).
- kind:
- description:
- - Indicates the type of database account. This can only be set at database account creation.
- choices:
- - 'global_document_db'
- - 'mongo_db'
- - 'parse'
- consistency_policy:
- description:
- - The consistency policy for the Cosmos DB account.
- suboptions:
- default_consistency_level:
- description:
- - The default consistency level and configuration settings of the Cosmos DB account.
- - Required when I(state=present).
- choices:
- - 'eventual'
- - 'session'
- - 'bounded_staleness'
- - 'strong'
- - 'consistent_prefix'
- max_staleness_prefix:
- description:
- - When used with the Bounded Staleness consistency level, this value represents the number of stale requests tolerated.
- - Accepted range for this value is 1 - 2,147,483,647. Required when I(default_consistency_policy=bounded_staleness).
- type: int
- max_interval_in_seconds:
- description:
- - When used with the Bounded Staleness consistency level, this value represents the time amount of staleness (in seconds) tolerated.
- - Accepted range for this value is 5 - 86400. Required when I(default_consistency_policy=bounded_staleness).
- type: int
- geo_rep_locations:
- description:
- - An array that contains the georeplication locations enabled for the Cosmos DB account.
- - Required when I(state=present).
- type: list
- suboptions:
- name:
- description:
- - The name of the region.
- failover_priority:
- description:
- - The failover priority of the region. A failover priority of 0 indicates a write region.
- - The maximum value for a failover priority = (total number of regions - 1).
- - Failover priority values must be unique for each of the regions in which the database account exists.
- type: int
- database_account_offer_type:
- description:
- - Database account offer type, for example I(Standard)
- - Required when I(state=present).
- ip_range_filter:
- description:
- - Cosmos DB Firewall support. This value specifies the set of IP addresses or IP address ranges.
- - In CIDR form to be included as the allowed list of client IPs for a given database account.
- - IP addresses/ranges must be comma separated and must not contain any spaces.
- is_virtual_network_filter_enabled:
- description:
- - Flag to indicate whether to enable/disable Virtual Network ACL rules.
- type: bool
- enable_automatic_failover:
- description:
- - Enables automatic failover of the write region in the rare event that the region is unavailable due to an outage.
- - Automatic failover will result in a new write region for the account and is chosen based on the failover priorities configured for the account.
- type: bool
- enable_cassandra:
- description:
- - Enable Cassandra.
- type: bool
- enable_table:
- description:
- - Enable Table.
- type: bool
- enable_gremlin:
- description:
- - Enable Gremlin.
- type: bool
- virtual_network_rules:
- description:
- - List of Virtual Network ACL rules configured for the Cosmos DB account.
- type: list
- suboptions:
- subnet:
- description:
- - It can be a string containing resource id of a subnet.
- - It can be a dictionary containing 'resource_group', 'virtual_network_name' and 'subnet_name'
- ignore_missing_vnet_service_endpoint:
- description:
- - Create Cosmos DB account without existing virtual network service endpoint.
- type: bool
-
- enable_multiple_write_locations:
- description:
- - Enables the account to write in multiple locations
- type: bool
- state:
- description:
- - Assert the state of the Database Account.
- - Use C(present) to create or update an Database Account and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Create Cosmos DB Account - min
- azure_rm_cosmosdbaccount:
- resource_group: myResourceGroup
- name: myDatabaseAccount
- location: westus
- geo_rep_locations:
- - name: southcentralus
- failover_priority: 0
- database_account_offer_type: Standard
-
- - name: Create Cosmos DB Account - max
- azure_rm_cosmosdbaccount:
- resource_group: myResourceGroup
- name: myDatabaseAccount
- location: westus
- kind: mongo_db
- geo_rep_locations:
- - name: southcentralus
- failover_priority: 0
- database_account_offer_type: Standard
- ip_range_filter: 10.10.10.10
- enable_multiple_write_locations: yes
- virtual_network_rules:
- - subnet: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVi
- rtualNetwork/subnets/mySubnet"
- consistency_policy:
- default_consistency_level: bounded_staleness
- max_staleness_prefix: 10
- max_interval_in_seconds: 1000
-'''
-
-RETURN = '''
-id:
- description:
- - The unique resource identifier of the database account.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DocumentDB/databaseAccounts/myData
- baseAccount"
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _snake_to_camel
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrestazure.azure_operation import AzureOperationPoller
- from azure.mgmt.cosmosdb import CosmosDB
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMCosmosDBAccount(AzureRMModuleBase):
- """Configuration class for an Azure RM Database Account resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- kind=dict(
- type='str',
- choices=['global_document_db',
- 'mongo_db',
- 'parse']
- ),
- consistency_policy=dict(
- type='dict',
- options=dict(
- default_consistency_level=dict(
- type='str',
- choices=['eventual',
- 'session',
- 'bounded_staleness',
- 'strong',
- 'consistent_prefix']
- ),
- max_staleness_prefix=dict(
- type='int'
- ),
- max_interval_in_seconds=dict(
- type='int'
- )
- )
- ),
- geo_rep_locations=dict(
- type='list',
- options=dict(
- name=dict(
- type='str',
- required=True
- ),
- failover_priority=dict(
- type='int',
- required=True
- )
- )
- ),
- database_account_offer_type=dict(
- type='str'
- ),
- ip_range_filter=dict(
- type='str'
- ),
- is_virtual_network_filter_enabled=dict(
- type='bool'
- ),
- enable_automatic_failover=dict(
- type='bool'
- ),
- enable_cassandra=dict(
- type='bool'
- ),
- enable_table=dict(
- type='bool'
- ),
- enable_gremlin=dict(
- type='bool'
- ),
- virtual_network_rules=dict(
- type='list',
- options=dict(
- id=dict(
- type='str',
- required=True
- ),
- ignore_missing_vnet_service_endpoint=dict(
- type='bool'
- )
- )
- ),
- enable_multiple_write_locations=dict(
- type='bool'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.name = None
- self.parameters = dict()
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMCosmosDBAccount, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- self.parameters[key] = kwargs[key]
-
- kind = self.parameters.get('kind')
- if kind == 'global_document_db':
- self.parameters['kind'] = 'GlobalDocumentDB'
- elif kind == 'mongo_db':
- self.parameters['kind'] = 'MongoDB'
- elif kind == 'parse':
- self.parameters['kind'] = 'Parse'
-
- dict_camelize(self.parameters, ['consistency_policy', 'default_consistency_level'], True)
- dict_rename(self.parameters, ['geo_rep_locations', 'name'], 'location_name')
- dict_rename(self.parameters, ['geo_rep_locations'], 'locations')
- self.parameters['capabilities'] = []
- if self.parameters.pop('enable_cassandra', False):
- self.parameters['capabilities'].append({'name': 'EnableCassandra'})
- if self.parameters.pop('enable_table', False):
- self.parameters['capabilities'].append({'name': 'EnableTable'})
- if self.parameters.pop('enable_gremlin', False):
- self.parameters['capabilities'].append({'name': 'EnableGremlin'})
-
- for rule in self.parameters.get('virtual_network_rules', []):
- subnet = rule.pop('subnet')
- if isinstance(subnet, dict):
- virtual_network_name = subnet.get('virtual_network_name')
- subnet_name = subnet.get('subnet_name')
- resource_group_name = subnet.get('resource_group', self.resource_group)
- template = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/virtualNetworks/{2}/subnets/{3}"
- subnet = template.format(self.subscription_id, resource_group_name, virtual_network_name, subnet_name)
- rule['id'] = subnet
-
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(CosmosDB,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- resource_group = self.get_resource_group(self.resource_group)
-
- if "location" not in self.parameters:
- self.parameters["location"] = resource_group.location
-
- old_response = self.get_databaseaccount()
-
- if not old_response:
- self.log("Database Account instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("Database Account instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- old_response['locations'] = old_response['failover_policies']
- if not default_compare(self.parameters, old_response, '', self.results):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the Database Account instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_databaseaccount()
-
- self.results['changed'] = True
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("Database Account instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_databaseaccount()
- else:
- self.log("Database Account instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if self.state == 'present':
- self.results.update({'id': response.get('id', None)})
- return self.results
-
- def create_update_databaseaccount(self):
- '''
- Creates or updates Database Account with the specified configuration.
-
- :return: deserialized Database Account instance state dictionary
- '''
- self.log("Creating / Updating the Database Account instance {0}".format(self.name))
-
- try:
- response = self.mgmt_client.database_accounts.create_or_update(resource_group_name=self.resource_group,
- account_name=self.name,
- create_update_parameters=self.parameters)
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the Database Account instance.')
- self.fail("Error creating the Database Account instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_databaseaccount(self):
- '''
- Deletes specified Database Account instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the Database Account instance {0}".format(self.name))
- try:
- response = self.mgmt_client.database_accounts.delete(resource_group_name=self.resource_group,
- account_name=self.name)
-
- # This currently doesn't work as there is a bug in SDK / Service
- # if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- # response = self.get_poller_result(response)
- except CloudError as e:
- self.log('Error attempting to delete the Database Account instance.')
- self.fail("Error deleting the Database Account instance: {0}".format(str(e)))
-
- return True
-
- def get_databaseaccount(self):
- '''
- Gets the properties of the specified Database Account.
-
- :return: deserialized Database Account instance state dictionary
- '''
- self.log("Checking if the Database Account instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mgmt_client.database_accounts.get(resource_group_name=self.resource_group,
- account_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("Database Account instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the Database Account instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def default_compare(new, old, path, result):
- if new is None:
- return True
- elif isinstance(new, dict):
- if not isinstance(old, dict):
- result['compare'] = 'changed [' + path + '] old dict is null'
- return False
- for k in new.keys():
- if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
- return False
- return True
- elif isinstance(new, list):
- if not isinstance(old, list) or len(new) != len(old):
- result['compare'] = 'changed [' + path + '] length is different or null'
- return False
- elif len(old) == 0:
- return True
- elif isinstance(old[0], dict):
- key = None
- if 'id' in old[0] and 'id' in new[0]:
- key = 'id'
- elif 'name' in old[0] and 'name' in new[0]:
- key = 'name'
- else:
- key = list(old[0])[0]
- new = sorted(new, key=lambda x: x.get(key, ''))
- old = sorted(old, key=lambda x: x.get(key, ''))
- else:
- new = sorted(new)
- old = sorted(old)
- for i in range(len(new)):
- if not default_compare(new[i], old[i], path + '/*', result):
- return False
- return True
- else:
- if path == '/location' or path.endswith('location_name'):
- new = new.replace(' ', '').lower()
- old = new.replace(' ', '').lower()
- if new == old:
- return True
- else:
- result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old)
- return False
-
-
-def dict_camelize(d, path, camelize_first):
- if isinstance(d, list):
- for i in range(len(d)):
- dict_camelize(d[i], path, camelize_first)
- elif isinstance(d, dict):
- if len(path) == 1:
- old_value = d.get(path[0], None)
- if old_value is not None:
- d[path[0]] = _snake_to_camel(old_value, camelize_first)
- else:
- sd = d.get(path[0], None)
- if sd is not None:
- dict_camelize(sd, path[1:], camelize_first)
-
-
-def dict_upper(d, path):
- if isinstance(d, list):
- for i in range(len(d)):
- dict_upper(d[i], path)
- elif isinstance(d, dict):
- if len(path) == 1:
- old_value = d.get(path[0], None)
- if old_value is not None:
- d[path[0]] = old_value.upper()
- else:
- sd = d.get(path[0], None)
- if sd is not None:
- dict_upper(sd, path[1:])
-
-
-def dict_rename(d, path, new_name):
- if isinstance(d, list):
- for i in range(len(d)):
- dict_rename(d[i], path, new_name)
- elif isinstance(d, dict):
- if len(path) == 1:
- old_value = d.pop(path[0], None)
- if old_value is not None:
- d[new_name] = old_value
- else:
- sd = d.get(path[0], None)
- if sd is not None:
- dict_rename(sd, path[1:], new_name)
-
-
-def dict_expand(d, path, outer_dict_name):
- if isinstance(d, list):
- for i in range(len(d)):
- dict_expand(d[i], path, outer_dict_name)
- elif isinstance(d, dict):
- if len(path) == 1:
- old_value = d.pop(path[0], None)
- if old_value is not None:
- d[outer_dict_name] = d.get(outer_dict_name, {})
- d[outer_dict_name] = old_value
- else:
- sd = d.get(path[0], None)
- if sd is not None:
- dict_expand(sd, path[1:], outer_dict_name)
-
-
-def main():
- """Main execution"""
- AzureRMCosmosDBAccount()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount_info.py b/lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount_info.py
deleted file mode 100644
index f247e91fb6..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount_info.py
+++ /dev/null
@@ -1,520 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_cosmosdbaccount_info
-version_added: "2.9"
-short_description: Get Azure Cosmos DB Account facts
-description:
- - Get facts of Azure Cosmos DB Account.
-
-options:
- resource_group:
- description:
- - Name of an Azure resource group.
- name:
- description:
- - Cosmos DB database account name.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- retrieve_keys:
- description:
- - Retrieve keys and connection strings.
- type: str
- choices:
- - all
- - readonly
- retrieve_connection_strings:
- description:
- - Retrieve connection strings.
- type: bool
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of Database Account
- azure_rm_cosmosdbaccount_info:
- resource_group: myResourceGroup
- name: testaccount
-
- - name: List instances of Database Account
- azure_rm_cosmosdbaccousnt_info:
- resource_group: myResourceGroup
-'''
-
-RETURN = '''
-accounts:
- description: A list of dictionaries containing facts for Database Account.
- returned: always
- type: complex
- contains:
- id:
- description:
- - The unique resource identifier of the database account.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DocumentDB/databaseAccount
- s/testaccount"
- resource_group:
- description:
- - Name of an Azure resource group.
- returned: always
- type: str
- sample: myResourceGroup
- name:
- description:
- - The name of the database account.
- returned: always
- type: str
- sample: testaccount
- location:
- description:
- - The location of the resource group to which the resource belongs.
- returned: always
- type: str
- sample: westus
- kind:
- description:
- - Indicates the type of database account.
- returned: always
- type: str
- sample: global_document_db
- consistency_policy:
- description:
- - Consistency policy.
- returned: always
- type: complex
- contains:
- default_consistency_level:
- description:
- - Default consistency level.
- returned: always
- type: str
- sample: session
- max_interval_in_seconds:
- description:
- - Maximum interval in seconds.
- returned: always
- type: int
- sample: 5
- max_staleness_prefix:
- description:
- - Maximum staleness prefix.
- returned: always
- type: int
- sample: 100
- failover_policies:
- description:
- - The list of new failover policies for the failover priority change.
- returned: always
- type: complex
- contains:
- name:
- description:
- - Location name.
- returned: always
- type: str
- sample: eastus
- failover_priority:
- description:
- - Failover priority.
- returned: always
- type: int
- sample: 0
- id:
- description:
- - Read location ID.
- returned: always
- type: str
- sample: testaccount-eastus
- read_locations:
- description:
- - Read locations.
- returned: always
- type: complex
- contains:
- name:
- description:
- - Location name.
- returned: always
- type: str
- sample: eastus
- failover_priority:
- description:
- - Failover priority.
- returned: always
- type: int
- sample: 0
- id:
- description:
- - Read location ID.
- returned: always
- type: str
- sample: testaccount-eastus
- document_endpoint:
- description:
- - Document endpoint.
- returned: always
- type: str
- sample: https://testaccount-eastus.documents.azure.com:443/
- provisioning_state:
- description:
- - Provisioning state.
- returned: always
- type: str
- sample: Succeeded
- write_locations:
- description:
- - Write locations.
- returned: always
- type: complex
- contains:
- name:
- description:
- - Location name.
- returned: always
- type: str
- sample: eastus
- failover_priority:
- description:
- - Failover priority.
- returned: always
- type: int
- sample: 0
- id:
- description:
- - Read location ID.
- returned: always
- type: str
- sample: testaccount-eastus
- document_endpoint:
- description:
- - Document endpoint.
- returned: always
- type: str
- sample: https://testaccount-eastus.documents.azure.com:443/
- provisioning_state:
- description:
- - Provisioning state.
- returned: always
- type: str
- sample: Succeeded
- database_account_offer_type:
- description:
- - Offer type.
- returned: always
- type: str
- sample: Standard
- ip_range_filter:
- description:
- - Enable IP range filter.
- returned: always
- type: str
- sample: 10.10.10.10
- is_virtual_network_filter_enabled:
- description:
- - Enable virtual network filter.
- returned: always
- type: bool
- sample: true
- enable_automatic_failover:
- description:
- - Enable automatic failover.
- returned: always
- type: bool
- sample: true
- enable_cassandra:
- description:
- - Enable Cassandra.
- returned: always
- type: bool
- sample: true
- enable_table:
- description:
- - Enable Table.
- returned: always
- type: bool
- sample: true
- enable_gremlin:
- description:
- - Enable Gremlin.
- returned: always
- type: bool
- sample: true
- virtual_network_rules:
- description:
- - List of Virtual Network ACL rules configured for the Cosmos DB account.
- type: list
- contains:
- subnet:
- description:
- - Resource id of a subnet.
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNet
- works/testvnet/subnets/testsubnet1"
- ignore_missing_vnet_service_endpoint:
- description:
- - Create Cosmos DB account without existing virtual network service endpoint.
- type: bool
- enable_multiple_write_locations:
- description:
- - Enable multiple write locations.
- returned: always
- type: bool
- sample: true
- document_endpoint:
- description:
- - Document endpoint.
- returned: always
- type: str
- sample: https://testaccount.documents.azure.com:443/
- provisioning_state:
- description:
- - Provisioning state of Cosmos DB.
- returned: always
- type: str
- sample: Succeeded
- primary_master_key:
- description:
- - Primary master key.
- returned: when requested
- type: str
- sample: UIWoYD4YaD4LxW6k3Jy69qcHDMLX4aSttECQkEcwWF1RflLd6crWSGJs0R9kJwujehtfLGeQx4ISVSJfTpJkYw==
- secondary_master_key:
- description:
- - Primary master key.
- returned: when requested
- type: str
- sample: UIWoYD4YaD4LxW6k3Jy69qcHDMLX4aSttECQkEcwWF1RflLd6crWSGJs0R9kJwujehtfLGeQx4ISVSJfTpJkYw==
- primary_readonly_master_key:
- description:
- - Primary master key.
- returned: when requested
- type: str
- sample: UIWoYD4YaD4LxW6k3Jy69qcHDMLX4aSttECQkEcwWF1RflLd6crWSGJs0R9kJwujehtfLGeQx4ISVSJfTpJkYw==
- secondary_readonly_master_key:
- description:
- - Primary master key.
- returned: when requested
- type: str
- sample: UIWoYD4YaD4LxW6k3Jy69qcHDMLX4aSttECQkEcwWF1RflLd6crWSGJs0R9kJwujehtfLGeQx4ISVSJfTpJkYw==
- connection_strings:
- description:
- - List of connection strings.
- type: list
- returned: when requested
- contains:
- connection_string:
- description:
- - Description of connection string.
- type: str
- returned: always
- sample: Primary SQL Connection String
- description:
- description:
- - Connection string.
- type: str
- returned: always
- sample: "AccountEndpoint=https://testaccount.documents.azure.com:443/;AccountKey=fSEjathnk6ZeBTrXkud9j5kfhtSEQ
- q3dpJxJga76h9BZkK2BJJrDzSO6DDn6yKads017OZBZ1YZWyq1cW4iuvA=="
- tags:
- description:
- - Tags assigned to the resource. Dictionary of "string":"string" pairs.
- returned: always
- type: dict
- sample: { "tag1":"abc" }
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _camel_to_snake
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.cosmosdb import CosmosDB
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMCosmosDBAccountInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str'
- ),
- name=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- ),
- retrieve_keys=dict(
- type='str',
- choices=['all', 'readonly']
- ),
- retrieve_connection_strings=dict(
- type='bool'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.name = None
- self.tags = None
- self.retrieve_keys = None
- self.retrieve_connection_strings = None
-
- super(AzureRMCosmosDBAccountInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_cosmosdbaccount_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_cosmosdbaccount_facts' module has been renamed to 'azure_rm_cosmosdbaccount_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(CosmosDB,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if self.name is not None:
- self.results['accounts'] = self.get()
- elif self.resource_group is not None:
- self.results['accounts'] = self.list_all()
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.database_accounts.get(resource_group_name=self.resource_group,
- account_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Database Account.')
-
- if response and self.has_tags(response.tags, self.tags):
- results.append(self.format_response(response))
-
- return results
-
- def list_by_resource_group(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.database_accounts.list_by_resource_group(resource_group_name=self.resource_group)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Database Account.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_response(item))
-
- return results
-
- def list_all(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.database_accounts.list()
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Database Account.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_response(item))
-
- return results
-
- def format_response(self, item):
- d = item.as_dict()
- d = {
- 'id': d.get('id'),
- 'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'),
- 'name': d.get('name', None),
- 'location': d.get('location', '').replace(' ', '').lower(),
- 'kind': _camel_to_snake(d.get('kind', None)),
- 'consistency_policy': {'default_consistency_level': _camel_to_snake(d['consistency_policy']['default_consistency_level']),
- 'max_interval_in_seconds': d['consistency_policy']['max_interval_in_seconds'],
- 'max_staleness_prefix': d['consistency_policy']['max_staleness_prefix']},
- 'failover_policies': [{'name': fp['location_name'].replace(' ', '').lower(),
- 'failover_priority': fp['failover_priority'],
- 'id': fp['id']} for fp in d['failover_policies']],
- 'read_locations': [{'name': rl['location_name'].replace(' ', '').lower(),
- 'failover_priority': rl['failover_priority'],
- 'id': rl['id'],
- 'document_endpoint': rl['document_endpoint'],
- 'provisioning_state': rl['provisioning_state']} for rl in d['read_locations']],
- 'write_locations': [{'name': wl['location_name'].replace(' ', '').lower(),
- 'failover_priority': wl['failover_priority'],
- 'id': wl['id'],
- 'document_endpoint': wl['document_endpoint'],
- 'provisioning_state': wl['provisioning_state']} for wl in d['write_locations']],
- 'database_account_offer_type': d.get('database_account_offer_type'),
- 'ip_range_filter': d['ip_range_filter'],
- 'is_virtual_network_filter_enabled': d.get('is_virtual_network_filter_enabled'),
- 'enable_automatic_failover': d.get('enable_automatic_failover'),
- 'enable_cassandra': 'EnableCassandra' in d.get('capabilities', []),
- 'enable_table': 'EnableTable' in d.get('capabilities', []),
- 'enable_gremlin': 'EnableGremlin' in d.get('capabilities', []),
- 'virtual_network_rules': d.get('virtual_network_rules'),
- 'enable_multiple_write_locations': d.get('enable_multiple_write_locations'),
- 'document_endpoint': d.get('document_endpoint'),
- 'provisioning_state': d.get('provisioning_state'),
- 'tags': d.get('tags', None)
- }
-
- if self.retrieve_keys == 'all':
- keys = self.mgmt_client.database_accounts.list_keys(resource_group_name=self.resource_group,
- account_name=self.name)
- d['primary_master_key'] = keys.primary_master_key
- d['secondary_master_key'] = keys.secondary_master_key
- d['primary_readonly_master_key'] = keys.primary_readonly_master_key
- d['secondary_readonly_master_key'] = keys.secondary_readonly_master_key
- elif self.retrieve_keys == 'readonly':
- keys = self.mgmt_client.database_accounts.get_read_only_keys(resource_group_name=self.resource_group,
- account_name=self.name)
- d['primary_readonly_master_key'] = keys.primary_readonly_master_key
- d['secondary_readonly_master_key'] = keys.secondary_readonly_master_key
- if self.retrieve_connection_strings:
- connection_strings = self.mgmt_client.database_accounts.list_connection_strings(resource_group_name=self.resource_group,
- account_name=self.name)
- d['connection_strings'] = connection_strings.as_dict()
- return d
-
-
-def main():
- AzureRMCosmosDBAccountInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_deployment.py b/lib/ansible/modules/cloud/azure/azure_rm_deployment.py
deleted file mode 100644
index 93f4103d4c..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_deployment.py
+++ /dev/null
@@ -1,702 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_deployment
-
-short_description: Create or destroy Azure Resource Manager template deployments
-
-version_added: "2.1"
-
-description:
- - Create or destroy Azure Resource Manager template deployments via the Azure SDK for Python.
- - You can find some quick start templates in GitHub here U(https://github.com/azure/azure-quickstart-templates).
- - For more information on Azure Resource Manager templates see U(https://azure.microsoft.com/en-us/documentation/articles/resource-group-template-deploy/).
-
-options:
- resource_group:
- description:
- - The resource group name to use or create to host the deployed template.
- required: true
- aliases:
- - resource_group_name
- name:
- description:
- - The name of the deployment to be tracked in the resource group deployment history.
- - Re-using a deployment name will overwrite the previous value in the resource group's deployment history.
- default: ansible-arm
- aliases:
- - deployment_name
- location:
- description:
- - The geo-locations in which the resource group will be located.
- default: westus
- deployment_mode:
- description:
- - In incremental mode, resources are deployed without deleting existing resources that are not included in the template.
- - In complete mode resources are deployed and existing resources in the resource group not included in the template are deleted.
- default: incremental
- choices:
- - complete
- - incremental
- template:
- description:
- - A hash containing the templates inline. This parameter is mutually exclusive with I(template_link).
- - Either I(template) or I(template_link) is required if I(state=present).
- type: dict
- template_link:
- description:
- - Uri of file containing the template body. This parameter is mutually exclusive with I(template).
- - Either I(template) or I(template_link) is required if I(state=present).
- parameters:
- description:
- - A hash of all the required template variables for the deployment template. This parameter is mutually exclusive with I(parameters_link).
- - Either I(parameters_link) or I(parameters) is required if I(state=present).
- type: dict
- parameters_link:
- description:
- - Uri of file containing the parameters body. This parameter is mutually exclusive with I(parameters).
- - Either I(parameters_link) or I(parameters) is required if I(state=present).
- wait_for_deployment_completion:
- description:
- - Whether or not to block until the deployment has completed.
- type: bool
- default: 'yes'
- wait_for_deployment_polling_period:
- description:
- - Time (in seconds) to wait between polls when waiting for deployment completion.
- default: 10
- state:
- description:
- - If I(state=present), template will be created.
- - If I(state=present) and deployment exists, it will be updated.
- - If I(state=absent), stack will be removed.
- default: present
- choices:
- - present
- - absent
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - David Justice (@devigned)
- - Laurent Mazuel (@lmazuel)
- - Andre Price (@obsoleted)
-
-'''
-
-EXAMPLES = '''
-# Destroy a template deployment
-- name: Destroy Azure Deploy
- azure_rm_deployment:
- resource_group: myResourceGroup
- name: myDeployment
- state: absent
-
-# Create or update a template deployment based on uris using parameter and template links
-- name: Create Azure Deploy
- azure_rm_deployment:
- resource_group: myResourceGroup
- name: myDeployment
- template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.json'
- parameters_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.parameters.json'
-
-# Create or update a template deployment based on a uri to the template and parameters specified inline.
-# This deploys a VM with SSH support for a given public key, then stores the result in 'azure_vms'. The result is then
-# used to create a new host group. This host group is then used to wait for each instance to respond to the public IP SSH.
----
-- name: Create Azure Deploy
- azure_rm_deployment:
- resource_group: myResourceGroup
- name: myDeployment
- parameters:
- newStorageAccountName:
- value: devopsclestorage1
- adminUsername:
- value: devopscle
- dnsNameForPublicIP:
- value: devopscleazure
- location:
- value: West US
- vmSize:
- value: Standard_A2
- vmName:
- value: ansibleSshVm
- sshKeyData:
- value: YOUR_SSH_PUBLIC_KEY
- template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-sshkey/azuredeploy.json'
- register: azure
-- name: Add new instance to host group
- add_host:
- hostname: "{{ item['ips'][0].public_ip }}"
- groupname: azure_vms
- loop: "{{ azure.deployment.instances }}"
-
-# Deploy an Azure WebApp running a hello world'ish node app
-- name: Create Azure WebApp Deployment at http://devopscleweb.azurewebsites.net/hello.js
- azure_rm_deployment:
- resource_group: myResourceGroup
- name: myDeployment
- parameters:
- repoURL:
- value: 'https://github.com/devigned/az-roadshow-oss.git'
- siteName:
- value: devopscleweb
- hostingPlanName:
- value: someplan
- siteLocation:
- value: westus
- sku:
- value: Standard
- template_link: 'https://raw.githubusercontent.com/azure/azure-quickstart-templates/master/201-web-app-github-deploy/azuredeploy.json'
-
-# Create or update a template deployment based on an inline template and parameters
-- name: Create Azure Deploy
- azure_rm_deployment:
- resource_group: myResourceGroup
- name: myDeployment
- template:
- $schema: "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#"
- contentVersion: "1.0.0.0"
- parameters:
- newStorageAccountName:
- type: "string"
- metadata:
- description: "Unique DNS Name for the Storage Account where the Virtual Machine's disks will be placed."
- adminUsername:
- type: "string"
- metadata:
- description: "User name for the Virtual Machine."
- adminPassword:
- type: "securestring"
- metadata:
- description: "Password for the Virtual Machine."
- dnsNameForPublicIP:
- type: "string"
- metadata:
- description: "Unique DNS Name for the Public IP used to access the Virtual Machine."
- ubuntuOSVersion:
- type: "string"
- defaultValue: "14.04.2-LTS"
- allowedValues:
- - "12.04.5-LTS"
- - "14.04.2-LTS"
- - "15.04"
- metadata:
- description: >
- The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version.
- Allowed values: 12.04.5-LTS, 14.04.2-LTS, 15.04."
- variables:
- location: "West US"
- imagePublisher: "Canonical"
- imageOffer: "UbuntuServer"
- OSDiskName: "osdiskforlinuxsimple"
- nicName: "myVMNic"
- addressPrefix: "192.0.2.0/24"
- subnetName: "Subnet"
- subnetPrefix: "10.0.0.0/24"
- storageAccountType: "Standard_LRS"
- publicIPAddressName: "myPublicIP"
- publicIPAddressType: "Dynamic"
- vmStorageAccountContainerName: "vhds"
- vmName: "MyUbuntuVM"
- vmSize: "Standard_D1"
- virtualNetworkName: "MyVNET"
- vnetID: "[resourceId('Microsoft.Network/virtualNetworks',variables('virtualNetworkName'))]"
- subnetRef: "[concat(variables('vnetID'),'/subnets/',variables('subnetName'))]"
- resources:
- - type: "Microsoft.Storage/storageAccounts"
- name: "[parameters('newStorageAccountName')]"
- apiVersion: "2015-05-01-preview"
- location: "[variables('location')]"
- properties:
- accountType: "[variables('storageAccountType')]"
- - apiVersion: "2015-05-01-preview"
- type: "Microsoft.Network/publicIPAddresses"
- name: "[variables('publicIPAddressName')]"
- location: "[variables('location')]"
- properties:
- publicIPAllocationMethod: "[variables('publicIPAddressType')]"
- dnsSettings:
- domainNameLabel: "[parameters('dnsNameForPublicIP')]"
- - type: "Microsoft.Network/virtualNetworks"
- apiVersion: "2015-05-01-preview"
- name: "[variables('virtualNetworkName')]"
- location: "[variables('location')]"
- properties:
- addressSpace:
- addressPrefixes:
- - "[variables('addressPrefix')]"
- subnets:
- -
- name: "[variables('subnetName')]"
- properties:
- addressPrefix: "[variables('subnetPrefix')]"
- - type: "Microsoft.Network/networkInterfaces"
- apiVersion: "2015-05-01-preview"
- name: "[variables('nicName')]"
- location: "[variables('location')]"
- dependsOn:
- - "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]"
- - "[concat('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]"
- properties:
- ipConfigurations:
- -
- name: "ipconfig1"
- properties:
- privateIPAllocationMethod: "Dynamic"
- publicIPAddress:
- id: "[resourceId('Microsoft.Network/publicIPAddresses',variables('publicIPAddressName'))]"
- subnet:
- id: "[variables('subnetRef')]"
- - type: "Microsoft.Compute/virtualMachines"
- apiVersion: "2015-06-15"
- name: "[variables('vmName')]"
- location: "[variables('location')]"
- dependsOn:
- - "[concat('Microsoft.Storage/storageAccounts/', parameters('newStorageAccountName'))]"
- - "[concat('Microsoft.Network/networkInterfaces/', variables('nicName'))]"
- properties:
- hardwareProfile:
- vmSize: "[variables('vmSize')]"
- osProfile:
- computername: "[variables('vmName')]"
- adminUsername: "[parameters('adminUsername')]"
- adminPassword: "[parameters('adminPassword')]"
- storageProfile:
- imageReference:
- publisher: "[variables('imagePublisher')]"
- offer: "[variables('imageOffer')]"
- sku: "[parameters('ubuntuOSVersion')]"
- version: "latest"
- osDisk:
- name: "osdisk"
- vhd:
- uri: >
- [concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net/',variables('vmStorageAccountContainerName'),'/',
- variables('OSDiskName'),'.vhd')]
- caching: "ReadWrite"
- createOption: "FromImage"
- networkProfile:
- networkInterfaces:
- -
- id: "[resourceId('Microsoft.Network/networkInterfaces',variables('nicName'))]"
- diagnosticsProfile:
- bootDiagnostics:
- enabled: "true"
- storageUri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net')]"
- parameters:
- newStorageAccountName:
- value: devopsclestorage
- adminUsername:
- value: devopscle
- adminPassword:
- value: Password1!
- dnsNameForPublicIP:
- value: devopscleazure
-'''
-
-RETURN = '''
-deployment:
- description: Deployment details.
- type: complex
- returned: always
- contains:
- group_name:
- description:
- - Name of the resource group.
- type: str
- returned: always
- sample: myResourceGroup
- id:
- description:
- - The Azure ID of the deployment.
- type: str
- returned: always
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Resources/deployments/myDeployment"
- instances:
- description:
- - Provides the public IP addresses for each VM instance.
- type: list
- returned: always
- contains:
- ips:
- description:
- - List of Public IP addresses.
- type: list
- returned: always
- contains:
- dns_settings:
- description:
- - DNS Settings.
- type: complex
- returned: always
- contains:
- domain_name_label:
- description:
- - Domain Name Label.
- type: str
- returned: always
- sample: myvirtualmachine
- fqdn:
- description:
- - Fully Qualified Domain Name.
- type: str
- returned: always
- sample: myvirtualmachine.eastus2.cloudapp.azure.com
- id:
- description:
- - Public IP resource id.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/p
- ublicIPAddresses/myPublicIP"
- name:
- description:
- - Public IP resource name.
- returned: always
- type: str
- sample: myPublicIP
- public_ip:
- description:
- - Public IP address value.
- returned: always
- type: str
- sample: 104.209.244.123
- public_ip_allocation_method:
- description:
- - Public IP allocation method.
- returned: always
- type: str
- sample: Dynamic
- vm_name:
- description:
- - Virtual machine name.
- returned: always
- type: str
- sample: myvirtualmachine
- name:
- description:
- - Name of the deployment.
- type: str
- returned: always
- sample: myDeployment
- outputs:
- description:
- - Dictionary of outputs received from the deployment.
- type: complex
- returned: always
- sample: { "hostname": { "type": "String", "value": "myvirtualmachine.eastus2.cloudapp.azure.com" } }
-'''
-
-import time
-
-try:
- from azure.common.credentials import ServicePrincipalCredentials
- import time
- import yaml
-except ImportError as exc:
- IMPORT_ERROR = "Error importing module prerequisites: %s" % exc
-
-try:
- from itertools import chain
- from azure.common.exceptions import CloudError
- from azure.mgmt.resource.resources import ResourceManagementClient
- from azure.mgmt.network import NetworkManagementClient
-
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-
-class AzureRMDeploymentManager(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True, aliases=['resource_group_name']),
- name=dict(type='str', default="ansible-arm", aliases=['deployment_name']),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- template=dict(type='dict', default=None),
- parameters=dict(type='dict', default=None),
- template_link=dict(type='str', default=None),
- parameters_link=dict(type='str', default=None),
- location=dict(type='str', default="westus"),
- deployment_mode=dict(type='str', default='incremental', choices=['complete', 'incremental']),
- wait_for_deployment_completion=dict(type='bool', default=True),
- wait_for_deployment_polling_period=dict(type='int', default=10)
- )
-
- mutually_exclusive = [('template', 'template_link'),
- ('parameters', 'parameters_link')]
-
- self.resource_group = None
- self.state = None
- self.template = None
- self.parameters = None
- self.template_link = None
- self.parameters_link = None
- self.location = None
- self.deployment_mode = None
- self.name = None
- self.wait_for_deployment_completion = None
- self.wait_for_deployment_polling_period = None
- self.tags = None
- self.append_tags = None
-
- self.results = dict(
- deployment=dict(),
- changed=False,
- msg=""
- )
-
- super(AzureRMDeploymentManager, self).__init__(derived_arg_spec=self.module_arg_spec,
- mutually_exclusive=mutually_exclusive,
- supports_check_mode=False)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['append_tags', 'tags']:
- setattr(self, key, kwargs[key])
-
- if self.state == 'present':
- deployment = self.deploy_template()
- if deployment is None:
- self.results['deployment'] = dict(
- name=self.name,
- group_name=self.resource_group,
- id=None,
- outputs=None,
- instances=None
- )
- else:
- self.results['deployment'] = dict(
- name=deployment.name,
- group_name=self.resource_group,
- id=deployment.id,
- outputs=deployment.properties.outputs,
- instances=self._get_instances(deployment)
- )
-
- self.results['changed'] = True
- self.results['msg'] = 'deployment succeeded'
- else:
- try:
- if self.get_resource_group(self.resource_group):
- self.destroy_resource_group()
- self.results['changed'] = True
- self.results['msg'] = "deployment deleted"
- except CloudError:
- # resource group does not exist
- pass
-
- return self.results
-
- def deploy_template(self):
- """
- Deploy the targeted template and parameters
- :param module: Ansible module containing the validated configuration for the deployment template
- :param client: resource management client for azure
- :param conn_info: connection info needed
- :return:
- """
-
- deploy_parameter = self.rm_models.DeploymentProperties(mode=self.deployment_mode)
- if not self.parameters_link:
- deploy_parameter.parameters = self.parameters
- else:
- deploy_parameter.parameters_link = self.rm_models.ParametersLink(
- uri=self.parameters_link
- )
- if not self.template_link:
- deploy_parameter.template = self.template
- else:
- deploy_parameter.template_link = self.rm_models.TemplateLink(
- uri=self.template_link
- )
-
- if self.append_tags and self.tags:
- try:
- # fetch the RG directly (instead of using the base helper) since we don't want to exit if it's missing
- rg = self.rm_client.resource_groups.get(self.resource_group)
- if rg.tags:
- self.tags = dict(self.tags, **rg.tags)
- except CloudError:
- # resource group does not exist
- pass
-
- params = self.rm_models.ResourceGroup(location=self.location, tags=self.tags)
-
- try:
- self.rm_client.resource_groups.create_or_update(self.resource_group, params)
- except CloudError as exc:
- self.fail("Resource group create_or_update failed with status code: %s and message: %s" %
- (exc.status_code, exc.message))
- try:
- result = self.rm_client.deployments.create_or_update(self.resource_group,
- self.name,
- deploy_parameter)
-
- deployment_result = None
- if self.wait_for_deployment_completion:
- deployment_result = self.get_poller_result(result)
- while deployment_result.properties is None or deployment_result.properties.provisioning_state not in ['Canceled', 'Failed', 'Deleted',
- 'Succeeded']:
- time.sleep(self.wait_for_deployment_polling_period)
- deployment_result = self.rm_client.deployments.get(self.resource_group, self.name)
- except CloudError as exc:
- failed_deployment_operations = self._get_failed_deployment_operations(self.name)
- self.log("Deployment failed %s: %s" % (exc.status_code, exc.message))
- self.fail("Deployment failed with status code: %s and message: %s" % (exc.status_code, exc.message),
- failed_deployment_operations=failed_deployment_operations)
-
- if self.wait_for_deployment_completion and deployment_result.properties.provisioning_state != 'Succeeded':
- self.log("provisioning state: %s" % deployment_result.properties.provisioning_state)
- failed_deployment_operations = self._get_failed_deployment_operations(self.name)
- self.fail('Deployment failed. Deployment id: %s' % deployment_result.id,
- failed_deployment_operations=failed_deployment_operations)
-
- return deployment_result
-
- def destroy_resource_group(self):
- """
- Destroy the targeted resource group
- """
- try:
- result = self.rm_client.resource_groups.delete(self.resource_group)
- result.wait() # Blocking wait till the delete is finished
- except CloudError as e:
- if e.status_code == 404 or e.status_code == 204:
- return
- else:
- self.fail("Delete resource group and deploy failed with status code: %s and message: %s" %
- (e.status_code, e.message))
-
- def _get_failed_nested_operations(self, current_operations):
- new_operations = []
- for operation in current_operations:
- if operation.properties.provisioning_state == 'Failed':
- new_operations.append(operation)
- if operation.properties.target_resource and \
- 'Microsoft.Resources/deployments' in operation.properties.target_resource.id:
- nested_deployment = operation.properties.target_resource.resource_name
- try:
- nested_operations = self.rm_client.deployment_operations.list(self.resource_group,
- nested_deployment)
- except CloudError as exc:
- self.fail("List nested deployment operations failed with status code: %s and message: %s" %
- (exc.status_code, exc.message))
- new_nested_operations = self._get_failed_nested_operations(nested_operations)
- new_operations += new_nested_operations
- return new_operations
-
- def _get_failed_deployment_operations(self, name):
- results = []
- # time.sleep(15) # there is a race condition between when we ask for deployment status and when the
- # # status is available.
-
- try:
- operations = self.rm_client.deployment_operations.list(self.resource_group, name)
- except CloudError as exc:
- self.fail("Get deployment failed with status code: %s and message: %s" %
- (exc.status_code, exc.message))
- try:
- results = [
- dict(
- id=op.id,
- operation_id=op.operation_id,
- status_code=op.properties.status_code,
- status_message=op.properties.status_message,
- target_resource=dict(
- id=op.properties.target_resource.id,
- resource_name=op.properties.target_resource.resource_name,
- resource_type=op.properties.target_resource.resource_type
- ) if op.properties.target_resource else None,
- provisioning_state=op.properties.provisioning_state,
- )
- for op in self._get_failed_nested_operations(operations)
- ]
- except Exception:
- # If we fail here, the original error gets lost and user receives wrong error message/stacktrace
- pass
- self.log(dict(failed_deployment_operations=results), pretty_print=True)
- return results
-
- def _get_instances(self, deployment):
- dep_tree = self._build_hierarchy(deployment.properties.dependencies)
- vms = self._get_dependencies(dep_tree, resource_type="Microsoft.Compute/virtualMachines")
- vms_and_nics = [(vm, self._get_dependencies(vm['children'], "Microsoft.Network/networkInterfaces"))
- for vm in vms]
- vms_and_ips = [(vm['dep'], self._nic_to_public_ips_instance(nics))
- for vm, nics in vms_and_nics]
- return [dict(vm_name=vm.resource_name, ips=[self._get_ip_dict(ip)
- for ip in ips]) for vm, ips in vms_and_ips if len(ips) > 0]
-
- def _get_dependencies(self, dep_tree, resource_type):
- matches = [value for value in dep_tree.values() if value['dep'].resource_type == resource_type]
- for child_tree in [value['children'] for value in dep_tree.values()]:
- matches += self._get_dependencies(child_tree, resource_type)
- return matches
-
- def _build_hierarchy(self, dependencies, tree=None):
- tree = dict(top=True) if tree is None else tree
- for dep in dependencies:
- if dep.resource_name not in tree:
- tree[dep.resource_name] = dict(dep=dep, children=dict())
- if isinstance(dep, self.rm_models.Dependency) and dep.depends_on is not None and len(dep.depends_on) > 0:
- self._build_hierarchy(dep.depends_on, tree[dep.resource_name]['children'])
-
- if 'top' in tree:
- tree.pop('top', None)
- keys = list(tree.keys())
- for key1 in keys:
- for key2 in keys:
- if key2 in tree and key1 in tree[key2]['children'] and key1 in tree:
- tree[key2]['children'][key1] = tree[key1]
- tree.pop(key1)
- return tree
-
- def _get_ip_dict(self, ip):
- ip_dict = dict(name=ip.name,
- id=ip.id,
- public_ip=ip.ip_address,
- public_ip_allocation_method=str(ip.public_ip_allocation_method)
- )
- if ip.dns_settings:
- ip_dict['dns_settings'] = {
- 'domain_name_label': ip.dns_settings.domain_name_label,
- 'fqdn': ip.dns_settings.fqdn
- }
- return ip_dict
-
- def _nic_to_public_ips_instance(self, nics):
- return [self.network_client.public_ip_addresses.get(public_ip_id.split('/')[4], public_ip_id.split('/')[-1])
- for nic_obj in (self.network_client.network_interfaces.get(self.resource_group,
- nic['dep'].resource_name) for nic in nics)
- for public_ip_id in [ip_conf_instance.public_ip_address.id
- for ip_conf_instance in nic_obj.ip_configurations
- if ip_conf_instance.public_ip_address]]
-
-
-def main():
- AzureRMDeploymentManager()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_deployment_info.py b/lib/ansible/modules/cloud/azure/azure_rm_deployment_info.py
deleted file mode 100644
index 4c133e6dbe..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_deployment_info.py
+++ /dev/null
@@ -1,249 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_deployment_info
-version_added: "2.9"
-short_description: Get Azure Deployment facts
-description:
- - Get facts of Azure Deployment.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- name:
- description:
- - The name of the deployment.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of Deployment
- azure_rm_deployment_info:
- resource_group: myResourceGroup
- name: myDeployment
-'''
-
-RETURN = '''
-deployments:
- description:
- - A list of dictionaries containing facts for deployments.
- returned: always
- type: complex
- contains:
- id:
- description:
- - The identifier of the resource.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Resources/deployments/myDeployment"
- resource_group:
- description:
- - Resource group name.
- returned: always
- sample: myResourceGroup
- name:
- description:
- - Deployment name.
- returned: always
- sample: myDeployment
- provisioning_state:
- description:
- - Provisioning state of the deployment.
- returned: always
- sample: Succeeded
- template_link:
- description:
- - Link to the template.
- returned: always
- sample: "https://raw.githubusercontent.com/Azure/azure-quickstart-templates/d01a5c06f4f1bc03a049ca17bbbd6e06d62657b3/101-vm-simple-linux/
- azuredeploy.json"
- parameters:
- description:
- - Dictionary containing deployment parameters.
- returned: always
- type: complex
- outputs:
- description:
- - Dictionary containing deployment outputs.
- returned: always
- output_resources:
- description:
- - List of resources.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource id.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkI
- nterfaces/myNetworkInterface"
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: myNetworkInterface
- type:
- description:
- - Resource type.
- returned: always
- type: str
- sample: Microsoft.Network/networkInterfaces
- depends_on:
- description:
- - List of resource ids.
- type: list
- returned: always
- sample:
- - "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGropup/providers/Microsoft.Network/virtualNet
- works/myVirtualNetwork"
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMDeploymentInfo(AzureRMModuleBase):
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
- self.results = dict(
- changed=False
- )
- self.resource_group = None
- self.name = None
-
- super(AzureRMDeploymentInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_deployment_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_deployment_facts' module has been renamed to 'azure_rm_deployment_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name:
- self.results['deployments'] = self.get()
- else:
- self.results['deployments'] = self.list()
-
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.rm_client.deployments.get(self.resource_group, deployment_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Deployment.')
-
- if response:
- results.append(self.format_response(response))
-
- return results
-
- def list(self):
- response = None
- results = []
- try:
- response = self.rm_client.deployments.list(self.resource_group)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Deployment.')
-
- if response is not None:
- for item in response:
- results.append(self.format_response(item))
-
- return results
-
- def format_response(self, item):
- d = item.as_dict()
- output_resources = {}
- for dependency in d.get('properties', {}).get('dependencies'):
- # go through dependent resources
- depends_on = []
- for depends_on_resource in dependency['depends_on']:
- depends_on.append(depends_on_resource['id'])
- # append if not in list
- if not output_resources.get(depends_on_resource['id']):
- sub_resource = {
- 'id': depends_on_resource['id'],
- 'name': depends_on_resource['resource_name'],
- 'type': depends_on_resource['resource_type'],
- 'depends_on': []
- }
- output_resources[depends_on_resource['id']] = sub_resource
- resource = {
- 'id': dependency['id'],
- 'name': dependency['resource_name'],
- 'type': dependency['resource_type'],
- 'depends_on': depends_on
- }
- output_resources[dependency['id']] = resource
-
- # convert dictionary to list
- output_resources_list = []
- for r in output_resources:
- output_resources_list.append(output_resources[r])
-
- d = {
- 'id': d.get('id'),
- 'resource_group': self.resource_group,
- 'name': d.get('name'),
- 'provisioning_state': d.get('properties', {}).get('provisioning_state'),
- 'parameters': d.get('properties', {}).get('parameters'),
- 'outputs': d.get('properties', {}).get('outputs'),
- 'output_resources': output_resources_list,
- 'template_link': d.get('properties', {}).get('template_link').get('uri')
- }
- return d
-
-
-def main():
- AzureRMDeploymentInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlab.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlab.py
deleted file mode 100644
index 8881fe719f..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlab.py
+++ /dev/null
@@ -1,284 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlab
-version_added: "2.8"
-short_description: Manage Azure DevTest Lab instance
-description:
- - Create, update and delete instance of Azure DevTest Lab.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- name:
- description:
- - The name of the lab.
- required: True
- location:
- description:
- - The location of the resource.
- storage_type:
- description:
- - Type of storage used by the lab. It can be either C(premium) or C(standard).
- choices:
- - 'standard'
- - 'premium'
- premium_data_disks:
- description:
- - Allow creation of premium data disks.
- type: bool
- state:
- description:
- - Assert the state of the DevTest Lab.
- - Use C(present) to create or update an DevTest Lab and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Create (or update) DevTest Lab
- azure_rm_devtestlab:
- resource_group: myResourceGroup
- name: mylab
- storage_type: standard
-'''
-
-RETURN = '''
-id:
- description:
- - The identifier of the resource.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/mylab
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _snake_to_camel
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrestazure.azure_operation import AzureOperationPoller
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMDevTestLab(AzureRMModuleBase):
- """Configuration class for an Azure RM DevTest Lab resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- storage_type=dict(
- type='str',
- choices=['standard',
- 'premium']
- ),
- premium_data_disks=dict(
- type='bool'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.name = None
- self.lab = {}
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMDevTestLab, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- self.lab[key] = kwargs[key]
-
- if self.lab.get('storage_type'):
- self.lab['lab_storage_type'] = _snake_to_camel(self.lab['storage_type'], True)
- self.lab.pop('storage_type', None)
- if self.lab.get('premium_data_disks') is not None:
- self.lab['premium_data_disks'] = 'Enabled' if self.lab['premium_data_disks'] else 'Disabled'
-
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2018-10-15')
-
- resource_group = self.get_resource_group(self.resource_group)
- if self.lab.get('location') is None:
- self.lab['location'] = resource_group.location
-
- old_response = self.get_devtestlab()
-
- if not old_response:
- self.log("DevTest Lab instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("DevTest Lab instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- if self.lab.get('lab_storage_type') is not None and \
- self.lab.get('lab_storage_type').lower() != old_response.get('lab_storage_type', '').lower():
- self.to_do = Actions.Update
- if (self.lab.get('premium_data_disks') is not None and
- self.lab.get('premium_data_disks').lower() != old_response.get('premium_data_disks').lower()):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the DevTest Lab instance")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- response = self.create_update_devtestlab()
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("DevTest Lab instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_devtestlab()
- # This currently doesnt' work as there is a bug in SDK / Service
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
- else:
- self.log("DevTest Lab instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if self.state == 'present':
- self.results.update({
- 'id': response.get('id', None)
- })
- return self.results
-
- def create_update_devtestlab(self):
- '''
- Creates or updates DevTest Lab with the specified configuration.
-
- :return: deserialized DevTest Lab instance state dictionary
- '''
- self.log("Creating / Updating the DevTest Lab instance {0}".format(self.name))
-
- try:
- response = self.mgmt_client.labs.create_or_update(resource_group_name=self.resource_group,
- name=self.name,
- lab=self.lab)
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the DevTest Lab instance.')
- self.fail("Error creating the DevTest Lab instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_devtestlab(self):
- '''
- Deletes specified DevTest Lab instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the DevTest Lab instance {0}".format(self.name))
- try:
- response = self.mgmt_client.labs.delete(resource_group_name=self.resource_group,
- name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the DevTest Lab instance.')
- self.fail("Error deleting the DevTest Lab instance: {0}".format(str(e)))
-
- return True
-
- def get_devtestlab(self):
- '''
- Gets the properties of the specified DevTest Lab.
-
- :return: deserialized DevTest Lab instance state dictionary
- '''
- self.log("Checking if the DevTest Lab instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mgmt_client.labs.get(resource_group_name=self.resource_group,
- name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("DevTest Lab instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the DevTest Lab instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMDevTestLab()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlab_info.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlab_info.py
deleted file mode 100644
index 24c52df295..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlab_info.py
+++ /dev/null
@@ -1,272 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlab_info
-version_added: "2.9"
-short_description: Get Azure DevTest Lab facts
-description:
- - Get facts of Azure DevTest Lab.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- type: str
- name:
- description:
- - The name of the lab.
- type: str
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- type: list
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-'''
-
-EXAMPLES = '''
- - name: List instances of DevTest Lab by resource group
- azure_rm_devtestlab_info:
- resource_group: testrg
-
- - name: List instances of DevTest Lab in subscription
- azure_rm_devtestlab_info:
-
- - name: Get instance of DevTest Lab
- azure_rm_devtestlab_info:
- resource_group: testrg
- name: testlab
-'''
-
-RETURN = '''
-labs:
- description:
- - A list of dictionaries containing facts for Lab.
- returned: always
- type: complex
- contains:
- id:
- description:
- - The identifier of the resource.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab
- resource_group:
- description:
- - The name of the resource.
- returned: always
- type: str
- sample: testrg
- name:
- description:
- - The name of the resource.
- returned: always
- type: str
- sample: testlab
- location:
- description:
- - The location of the resource.
- returned: always
- type: str
- sample: eastus
- storage_type:
- description:
- - Lab storage type.
- returned: always
- type: str
- sample: standard
- premium_data_disks:
- description:
- - Are premium data disks allowed.
- returned: always
- type: bool
- sample: false
- provisioning_state:
- description:
- - Lab provisioning state.
- returned: always
- type: str
- sample: Succeeded
- artifacts_storage_account:
- description:
- - Artifacts storage account ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/myLab6346
- default_premium_storage_account:
- description:
- - Default premium storage account ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/myLab6346
- default_storage_account:
- description:
- - Default storage account ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/myLab6346
- premium_data_disk_storage_account:
- description:
- - Default storage account ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/myLab6346
- vault_name:
- description:
- - Key vault ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.KeyVault/vaults/myLab6788
- tags:
- description:
- - The tags of the resource.
- returned: always
- type: complex
- sample: "{ 'MyTag': 'MyValue' }"
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMDevTestLabInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str'
- ),
- name=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.name = None
- self.tags = None
- super(AzureRMDevTestLabInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_devtestlab_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_devtestlab_facts' module has been renamed to 'azure_rm_devtestlab_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if self.resource_group is not None:
- if self.name is not None:
- self.results['labs'] = self.get()
- else:
- self.results['labs'] = self.list_by_resource_group()
- else:
- self.results['labs'] = self.list_by_subscription()
- return self.results
-
- def list_by_resource_group(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.labs.list_by_resource_group(resource_group_name=self.resource_group)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Lab.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_response(item))
-
- return results
-
- def list_by_subscription(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.labs.list_by_subscription()
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Lab.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_response(item))
-
- return results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.labs.get(resource_group_name=self.resource_group,
- name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Lab.')
-
- if response and self.has_tags(response.tags, self.tags):
- results.append(self.format_response(response))
-
- return results
-
- def format_response(self, item):
- d = item.as_dict()
- d = {
- 'id': d.get('id', None),
- 'resource_group': self.resource_group,
- 'name': d.get('name', None),
- 'location': d.get('location', '').replace(' ', '').lower(),
- 'storage_type': d.get('lab_storage_type', '').lower(),
- 'premium_data_disks': d.get('premium_data_disks') == 'Enabled',
- 'provisioning_state': d.get('provisioning_state'),
- 'artifacts_storage_account': d.get('artifacts_storage_account'),
- 'default_premium_storage_account': d.get('default_premium_storage_account'),
- 'default_storage_account': d.get('default_storage_account'),
- 'premium_data_disk_storage_account': d.get('premium_data_disk_storage_account'),
- 'vault_name': d.get('vault_name'),
- 'tags': d.get('tags', None)
- }
- return d
-
-
-def main():
- AzureRMDevTestLabInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabarmtemplate_info.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlabarmtemplate_info.py
deleted file mode 100644
index d3189fe2f3..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabarmtemplate_info.py
+++ /dev/null
@@ -1,226 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlabarmtemplate_info
-version_added: "2.9"
-short_description: Get Azure DevTest Lab ARM Template facts
-description:
- - Get facts of Azure DevTest Lab ARM Template.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- type: str
- lab_name:
- description:
- - The name of the lab.
- required: True
- type: str
- artifact_source_name:
- description:
- - The name of the artifact source.
- required: True
- type: str
- name:
- description:
- - The name of the ARM template.
- type: str
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get information on DevTest Lab ARM Template
- azure_rm_devtestlabarmtemplate_info:
- resource_group: myResourceGroup
- lab_name: myLab
- artifact_source_name: public environment repo
- name: WebApp
-'''
-
-RETURN = '''
-arm_templates:
- description:
- - A list of dictionaries containing facts for DevTest Lab ARM Template.
- returned: always
- type: complex
- contains:
- id:
- description:
- - The identifier of the resource.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/art
- ifactSources/public environment repo/armTemplates/WebApp"
- resource_group:
- description:
- - Resource group name.
- returned: always
- sample: myResourceGroup
- lab_name:
- description:
- - DevTest Lab name.
- returned: always
- sample: myLab
- artifact_source_name:
- description:
- - Artifact source name.
- returned: always
- sample: public environment repo
- name:
- description:
- - ARM Template name.
- returned: always
- sample: WebApp
- display_name:
- description:
- - The tags of the resource.
- returned: always
- sample: Web App
- description:
- description:
- - The tags of the resource.
- returned: always
- sample: This template creates an Azure Web App without a data store.
- publisher:
- description:
- - The tags of the resource.
- returned: always
- sample: Microsoft
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMDtlArmTemplateInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- lab_name=dict(
- type='str',
- required=True
- ),
- artifact_source_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.lab_name = None
- self.artifact_source_name = None
- self.name = None
- super(AzureRMDtlArmTemplateInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_devtestlabarmtemplate_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_devtestlabarmtemplate_facts' module has been renamed to 'azure_rm_devtestlabarmtemplate_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if self.name:
- self.results['armtemplates'] = self.get()
- else:
- self.results['armtemplates'] = self.list()
-
- return self.results
-
- def list(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.arm_templates.list(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- artifact_source_name=self.artifact_source_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not get facts for DTL ARM Template.')
-
- if response is not None:
- for item in response:
- results.append(self.format_response(item))
-
- return results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.arm_templates.get(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- artifact_source_name=self.artifact_source_name,
- name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not get facts for DTL ARM Template.')
-
- if response:
- results.append(self.format_response(response))
-
- return results
-
- def format_response(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'),
- 'lab_name': self.parse_resource_to_dict(d.get('id')).get('name'),
- 'artifact_source_name': self.parse_resource_to_dict(d.get('id')).get('child_name_1'),
- 'id': d.get('id', None),
- 'name': d.get('name'),
- 'display_name': d.get('display_name'),
- 'description': d.get('description'),
- 'publisher': d.get('publisher')
- }
- return d
-
-
-def main():
- AzureRMDtlArmTemplateInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifact_info.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifact_info.py
deleted file mode 100644
index 612e143bd0..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifact_info.py
+++ /dev/null
@@ -1,250 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlabartifact_info
-version_added: "2.9"
-short_description: Get Azure DevTest Lab Artifact facts
-description:
- - Get facts of Azure DevTest Lab Artifact.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- type: str
- lab_name:
- description:
- - The name of the lab.
- required: True
- type: str
- artifact_source_name:
- description:
- - The name of the artifact source.
- required: True
- type: str
- name:
- description:
- - The name of the artifact.
- type: str
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of DevTest Lab Artifact
- azure_rm_devtestlabartifact_info:
- resource_group: myResourceGroup
- lab_name: myLab
- artifact_source_name: myArtifactSource
- name: myArtifact
-'''
-
-RETURN = '''
-artifacts:
- description:
- - A list of dictionaries containing facts for DevTest Lab Artifact.
- returned: always
- type: complex
- contains:
- id:
- description:
- - The identifier of the artifact.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/ar
- tifactSources/myArtifactSource/artifacts/myArtifact"
- resource_group:
- description:
- - Name of the resource group.
- returned: always
- type: str
- sample: myResourceGroup
- lab_name:
- description:
- - Name of the lab.
- returned: always
- type: str
- sample: myLab
- artifact_source_name:
- description:
- - The name of the artifact source.
- returned: always
- type: str
- sample: myArtifactSource
- name:
- description:
- - The name of the artifact.
- returned: always
- type: str
- sample: myArtifact
- description:
- description:
- - Description of the artifact.
- returned: always
- type: str
- sample: Installs My Software
- file_path:
- description:
- - Artifact's path in the repo.
- returned: always
- type: str
- sample: Artifacts/myArtifact
- publisher:
- description:
- - Publisher name.
- returned: always
- type: str
- sample: MyPublisher
- target_os_type:
- description:
- - Target OS type.
- returned: always
- type: str
- sample: Linux
- title:
- description:
- - Title of the artifact.
- returned: always
- type: str
- sample: My Software
- parameters:
- description:
- - A dictionary containing parameters definition of the artifact.
- returned: always
- type: complex
- sample: {}
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMArtifactInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- lab_name=dict(
- type='str',
- required=True
- ),
- artifact_source_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.lab_name = None
- self.artifact_source_name = None
- self.name = None
- super(AzureRMArtifactInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if self.name:
- self.results['artifacts'] = self.get()
- else:
- self.results['artifacts'] = self.list()
-
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.artifacts.get(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- artifact_source_name=self.artifact_source_name,
- name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Artifact.')
-
- if response:
- results.append(self.format_response(response))
-
- return results
-
- def list(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.artifacts.list(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- artifact_source_name=self.artifact_source_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Artifact.')
-
- if response is not None:
- for item in response:
- results.append(self.format_response(item))
-
- return results
-
- def format_response(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'),
- 'lab_name': self.parse_resource_to_dict(d.get('id')).get('name'),
- 'artifact_source_name': self.parse_resource_to_dict(d.get('id')).get('child_name_1'),
- 'id': d.get('id'),
- 'description': d.get('description'),
- 'file_path': d.get('file_path'),
- 'name': d.get('name'),
- 'parameters': d.get('parameters'),
- 'publisher': d.get('publisher'),
- 'target_os_type': d.get('target_os_type'),
- 'title': d.get('title')
- }
- return d
-
-
-def main():
- AzureRMArtifactInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifactsource.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifactsource.py
deleted file mode 100644
index 99eb44bc31..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifactsource.py
+++ /dev/null
@@ -1,365 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlabartifactsource
-version_added: "2.8"
-short_description: Manage Azure DevTest Labs Artifacts Source instance
-description:
- - Create, update and delete instance of Azure DevTest Labs Artifacts Source.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- lab_name:
- description:
- - The name of the lab.
- required: True
- name:
- description:
- - The name of the artifact source.
- required: True
- display_name:
- description:
- - The artifact source's display name.
- uri:
- description:
- - The artifact source's URI.
- source_type:
- description:
- - The artifact source's type.
- choices:
- - 'vso'
- - 'github'
- folder_path:
- description:
- - The folder containing artifacts.
- arm_template_folder_path:
- description:
- - The folder containing Azure Resource Manager templates.
- branch_ref:
- description:
- - The artifact source's branch reference.
- security_token:
- description:
- - The security token to authenticate to the artifact source.
- is_enabled:
- description:
- - Indicates whether the artifact source is enabled.
- type: bool
- state:
- description:
- - Assert the state of the DevTest Labs Artifacts Source.
- - Use C(present) to create or update an DevTest Labs Artifacts Source and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Create (or update) DevTest Labs Artifacts Source
- azure_rm_devtestlabartifactsource:
- resource_group: myrg
- lab_name: mylab
- name: myartifacts
- uri: https://github.com/myself/myrepo.git
- source_type: github
- folder_path: /
- security_token: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
-'''
-
-RETURN = '''
-id:
- description:
- - The identifier of the resource.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myrg/providers/microsoft.devtestlab/labs/mylab/artifactsources/myartifacts
-is_enabled:
- description:
- - Indicates whether the artifact source is enabled.
- returned: always
- type: bool
- sample: true
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _snake_to_camel
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrestazure.azure_operation import AzureOperationPoller
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMDevTestLabArtifactsSource(AzureRMModuleBase):
- """Configuration class for an Azure RM DevTest Labs Artifacts Source resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- lab_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- display_name=dict(
- type='str'
- ),
- uri=dict(
- type='str'
- ),
- source_type=dict(
- type='str',
- choices=['vso',
- 'github']
- ),
- folder_path=dict(
- type='str'
- ),
- arm_template_folder_path=dict(
- type='str'
- ),
- branch_ref=dict(
- type='str'
- ),
- security_token=dict(
- type='str'
- ),
- is_enabled=dict(
- type='bool'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.lab_name = None
- self.name = None
- self.artifact_source = dict()
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.to_do = Actions.NoAction
-
- required_if = [
- ('state', 'present', [
- 'source_type', 'uri', 'security_token'])
- ]
-
- super(AzureRMDevTestLabArtifactsSource, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True,
- required_if=required_if)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- self.artifact_source[key] = kwargs[key]
-
- if self.artifact_source.get('source_type') == 'github':
- self.artifact_source['source_type'] = 'GitHub'
- elif self.artifact_source.get('source_type') == 'vso':
- self.artifact_source['source_type'] = 'VsoGit'
-
- if self.artifact_source.get('status') is not None:
- self.artifact_source['status'] = 'Enabled' if self.artifact_source.get('status') else 'Disabled'
-
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2018-10-15')
-
- old_response = self.get_devtestlabartifactssource()
-
- if not old_response:
- self.log("DevTest Labs Artifacts Source instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("DevTest Labs Artifacts Source instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.results['old_response'] = old_response
-
- if self.artifact_source.get('display_name') is not None:
- if self.artifact_source.get('display_name') != old_response.get('display_name'):
- self.to_do = Actions.Update
- else:
- self.artifact_source['display_name'] = old_response.get('display_name')
-
- if self.artifact_source.get('source_type').lower() != old_response.get('source_type').lower():
- self.to_do = Actions.Update
-
- if self.artifact_source.get('uri') != old_response.get('uri'):
- self.to_do = Actions.Update
-
- if self.artifact_source.get('branch_ref') is not None:
- if self.artifact_source.get('branch_ref') != old_response.get('branch_ref'):
- self.to_do = Actions.Update
- else:
- self.artifact_source['branch_ref'] = old_response.get('branch_ref')
-
- if self.artifact_source.get('status') is not None:
- if self.artifact_source.get('status') != old_response.get('status'):
- self.to_do = Actions.Update
- else:
- self.artifact_source['status'] = old_response.get('status')
-
- if self.artifact_source.get('folder_path') is not None:
- if self.artifact_source.get('folder_path') != old_response.get('folder_path'):
- self.to_do = Actions.Update
- else:
- self.artifact_source['folder_path'] = old_response.get('folder_path')
-
- if self.artifact_source.get('arm_template_folder_path') is not None:
- if self.artifact_source.get('arm_template_folder_path') != old_response.get('arm_template_folder_path'):
- self.to_do = Actions.Update
- else:
- self.artifact_source['arm_template_folder_path'] = old_response.get('arm_template_folder_path')
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the DevTest Labs Artifacts Source instance")
- self.results['changed'] = True
- if self.check_mode:
- return self.results
- response = self.create_update_devtestlabartifactssource()
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("DevTest Labs Artifacts Source instance deleted")
- self.results['changed'] = True
- if self.check_mode:
- return self.results
- self.delete_devtestlabartifactssource()
- else:
- self.log("DevTest Labs Artifacts Source instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if self.state == 'present':
- self.results.update({
- 'id': response.get('id', None),
- 'is_enabled': (response.get('status', None).lower() == 'enabled')
- })
- return self.results
-
- def create_update_devtestlabartifactssource(self):
- '''
- Creates or updates DevTest Labs Artifacts Source with the specified configuration.
-
- :return: deserialized DevTest Labs Artifacts Source instance state dictionary
- '''
- self.log("Creating / Updating the DevTest Labs Artifacts Source instance {0}".format(self.name))
-
- try:
- response = self.mgmt_client.artifact_sources.create_or_update(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name,
- artifact_source=self.artifact_source)
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the DevTest Labs Artifacts Source instance.')
- self.fail("Error creating the DevTest Labs Artifacts Source instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_devtestlabartifactssource(self):
- '''
- Deletes specified DevTest Labs Artifacts Source instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the DevTest Labs Artifacts Source instance {0}".format(self.name))
- try:
- response = self.mgmt_client.artifact_sources.delete(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the DevTest Labs Artifacts Source instance.')
- self.fail("Error deleting the DevTest Labs Artifacts Source instance: {0}".format(str(e)))
-
- return True
-
- def get_devtestlabartifactssource(self):
- '''
- Gets the properties of the specified DevTest Labs Artifacts Source.
-
- :return: deserialized DevTest Labs Artifacts Source instance state dictionary
- '''
- self.log("Checking if the DevTest Labs Artifacts Source instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mgmt_client.artifact_sources.get(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("DevTest Labs Artifacts Source instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the DevTest Labs Artifacts Source instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMDevTestLabArtifactsSource()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifactsource_info.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifactsource_info.py
deleted file mode 100644
index 8446b7d296..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifactsource_info.py
+++ /dev/null
@@ -1,258 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlabartifactsource_info
-version_added: "2.9"
-short_description: Get Azure DevTest Lab Artifact Source facts
-description:
- - Get facts of Azure DevTest Lab Artifact Source.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- type: str
- lab_name:
- description:
- - The name of DevTest Lab.
- required: True
- type: str
- name:
- description:
- - The name of DevTest Lab Artifact Source.
- type: str
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- type: list
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of DevTest Lab Artifact Source
- azure_rm_devtestlabartifactsource_info:
- resource_group: myResourceGroup
- lab_name: myLab
- name: myArtifactSource
-'''
-
-RETURN = '''
-artifactsources:
- description:
- - A list of dictionaries containing facts for DevTest Lab Artifact Source.
- returned: always
- type: complex
- contains:
- id:
- description:
- - The identifier of the artifact source.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/ar
- tifactSources/myArtifactSource"
- resource_group:
- description:
- - Name of the resource group.
- returned: always
- type: str
- sample: myResourceGroup
- lab_name:
- description:
- - Name of the lab.
- returned: always
- type: str
- sample: myLab
- name:
- description:
- - The name of the artifact source.
- returned: always
- type: str
- sample: myArtifactSource
- display_name:
- description:
- - The artifact source's display name.
- returned: always
- type: str
- sample: Public Artifact Repo
- source_type:
- description:
- - The artifact source's type.
- returned: always
- type: str
- sample: github
- is_enabled:
- description:
- - Is the artifact source enabled.
- returned: always
- type: str
- sample: True
- uri:
- description:
- - URI of the artifact source.
- returned: always
- type: str
- sample: https://github.com/Azure/azure-devtestlab.git
- folder_path:
- description:
- - The folder containing artifacts.
- returned: always
- type: str
- sample: /Artifacts
- arm_template_folder_path:
- description:
- - The folder containing Azure Resource Manager templates.
- returned: always
- type: str
- sample: /Environments
- provisioning_state:
- description:
- - Provisioning state of artifact source.
- returned: always
- type: str
- sample: Succeeded
- tags:
- description:
- - The tags of the resource.
- returned: always
- type: complex
- sample: "{ 'MyTag': 'MyValue' }"
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMDtlArtifactSourceInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- lab_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.lab_name = None
- self.name = None
- self.tags = None
- super(AzureRMDtlArtifactSourceInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_devtestlabartifactsource_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_devtestlabartifactsource_facts' module has been renamed to 'azure_rm_devtestlabartifactsource_info'",
- version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if self.name:
- self.results['artifactsources'] = self.get()
- else:
- self.results['artifactsources'] = self.list()
-
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.artifact_sources.get(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not get facts for Artifact Source.')
-
- if response and self.has_tags(response.tags, self.tags):
- results.append(self.format_response(response))
-
- return results
-
- def list(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.artifact_sources.list(resource_group_name=self.resource_group,
- lab_name=self.lab_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not get facts for Artifact Source.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_response(item))
-
- return results
-
- def format_response(self, item):
- d = item.as_dict()
- d = {
- 'id': d.get('id'),
- 'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'),
- 'lab_name': self.parse_resource_to_dict(d.get('id')).get('name'),
- 'name': d.get('name'),
- 'display_name': d.get('display_name'),
- 'tags': d.get('tags'),
- 'source_type': d.get('source_type').lower(),
- 'is_enabled': d.get('status') == 'Enabled',
- 'uri': d.get('uri'),
- 'arm_template_folder_path': d.get('arm_template_folder_path'),
- 'folder_path': d.get('folder_path'),
- 'provisioning_state': d.get('provisioning_state')
- }
- return d
-
-
-def main():
- AzureRMDtlArtifactSourceInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabcustomimage.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlabcustomimage.py
deleted file mode 100644
index ca52ec4579..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabcustomimage.py
+++ /dev/null
@@ -1,383 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlabcustomimage
-version_added: "2.8"
-short_description: Manage Azure DevTest Lab Custom Image instance
-description:
- - Create, update and delete instance of Azure DevTest Lab Custom Image.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- lab_name:
- description:
- - The name of the lab.
- required: True
- name:
- description:
- - The name of the custom image.
- required: True
- source_vm:
- description:
- - Source DevTest Lab virtual machine name.
- windows_os_state:
- description:
- - The state of the Windows OS.
- choices:
- - 'non_sysprepped'
- - 'sysprep_requested'
- - 'sysprep_applied'
- linux_os_state:
- description:
- - The state of the Linux OS.
- choices:
- - 'non_deprovisioned'
- - 'deprovision_requested'
- - 'deprovision_applied'
- description:
- description:
- - The description of the custom image.
- author:
- description:
- - The author of the custom image.
- state:
- description:
- - Assert the state of the Custom Image.
- - Use C(present) to create or update an Custom Image and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
-- name: Create instance of DevTest Lab Image
- azure_rm_devtestlabcustomimage:
- resource_group: myResourceGroup
- lab_name: myLab
- name: myImage
- source_vm: myDevTestLabVm
- linux_os_state: non_deprovisioned
-'''
-
-RETURN = '''
-id:
- description:
- - The identifier of the resource.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/images/myImage"
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _snake_to_camel
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrestazure.azure_operation import AzureOperationPoller
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMDtlCustomImage(AzureRMModuleBase):
- """Configuration class for an Azure RM Custom Image resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- lab_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- source_vm=dict(
- type='str'
- ),
- windows_os_state=dict(
- type='str',
- choices=['non_sysprepped',
- 'sysprep_requested',
- 'sysprep_applied']
- ),
- linux_os_state=dict(
- type='str',
- choices=['non_deprovisioned',
- 'deprovision_requested',
- 'deprovision_applied']
- ),
- description=dict(
- type='str'
- ),
- author=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.lab_name = None
- self.name = None
- self.custom_image = dict()
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.to_do = Actions.NoAction
-
- required_if = [
- ('state', 'present', [
- 'source_vm'])
- ]
-
- super(AzureRMDtlCustomImage, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True,
- required_if=required_if)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- self.custom_image[key] = kwargs[key]
-
- if self.state == 'present':
- windows_os_state = self.custom_image.pop('windows_os_state', False)
- linux_os_state = self.custom_image.pop('linux_os_state', False)
- source_vm_name = self.custom_image.pop('source_vm')
- temp = "/subscriptions/{0}/resourcegroups/{1}/providers/microsoft.devtestlab/labs/{2}/virtualmachines/{3}"
- self.custom_image['vm'] = {}
- self.custom_image['vm']['source_vm_id'] = temp.format(self.subscription_id, self.resource_group, self.lab_name, source_vm_name)
- if windows_os_state:
- self.custom_image['vm']['windows_os_info'] = {'windows_os_state': _snake_to_camel(windows_os_state, True)}
- elif linux_os_state:
- self.custom_image['vm']['linux_os_info'] = {'linux_os_state': _snake_to_camel(linux_os_state, True)}
- else:
- self.fail("Either 'linux_os_state' or 'linux_os_state' must be specified")
-
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- old_response = self.get_customimage()
-
- if not old_response:
- self.log("Custom Image instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("Custom Image instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- if (not default_compare(self.custom_image, old_response, '', self.results)):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the Custom Image instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_customimage()
-
- self.results['changed'] = True
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("Custom Image instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_customimage()
- # This currently doesnt' work as there is a bug in SDK / Service
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
- else:
- self.log("Custom Image instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if self.state == 'present':
- self.results.update({
- 'id': response.get('id', None)
- })
- return self.results
-
- def create_update_customimage(self):
- '''
- Creates or updates Custom Image with the specified configuration.
-
- :return: deserialized Custom Image instance state dictionary
- '''
- self.log("Creating / Updating the Custom Image instance {0}".format(self.name))
-
- try:
- response = self.mgmt_client.custom_images.create_or_update(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name,
- custom_image=self.custom_image)
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the Custom Image instance.')
- self.fail("Error creating the Custom Image instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_customimage(self):
- '''
- Deletes specified Custom Image instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the Custom Image instance {0}".format(self.name))
- try:
- response = self.mgmt_client.custom_images.delete(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the Custom Image instance.')
- self.fail("Error deleting the Custom Image instance: {0}".format(str(e)))
-
- return True
-
- def get_customimage(self):
- '''
- Gets the properties of the specified Custom Image.
-
- :return: deserialized Custom Image instance state dictionary
- '''
- self.log("Checking if the Custom Image instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mgmt_client.custom_images.get(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("Custom Image instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the Custom Image instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def default_compare(new, old, path, result):
- if new is None:
- return True
- elif isinstance(new, dict):
- if not isinstance(old, dict):
- result['compare'] = 'changed [' + path + '] old dict is null'
- return False
- for k in new.keys():
- if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
- return False
- return True
- elif isinstance(new, list):
- if not isinstance(old, list) or len(new) != len(old):
- result['compare'] = 'changed [' + path + '] length is different or null'
- return False
- if isinstance(old[0], dict):
- key = None
- if 'id' in old[0] and 'id' in new[0]:
- key = 'id'
- elif 'name' in old[0] and 'name' in new[0]:
- key = 'name'
- else:
- key = list(old[0])[0]
- new = sorted(new, key=lambda x: x.get(key, None))
- old = sorted(old, key=lambda x: x.get(key, None))
- else:
- new = sorted(new)
- old = sorted(old)
- for i in range(len(new)):
- if not default_compare(new[i], old[i], path + '/*', result):
- return False
- return True
- else:
- if path == '/location':
- new = new.replace(' ', '').lower()
- old = new.replace(' ', '').lower()
- if new == old:
- return True
- else:
- result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old)
- return False
-
-
-def dict_camelize(d, path, camelize_first):
- if isinstance(d, list):
- for i in range(len(d)):
- dict_camelize(d[i], path, camelize_first)
- elif isinstance(d, dict):
- if len(path) == 1:
- old_value = d.get(path[0], None)
- if old_value is not None:
- d[path[0]] = _snake_to_camel(old_value, camelize_first)
- else:
- sd = d.get(path[0], None)
- if sd is not None:
- dict_camelize(sd, path[1:], camelize_first)
-
-
-def main():
- """Main execution"""
- AzureRMDtlCustomImage()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabcustomimage_info.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlabcustomimage_info.py
deleted file mode 100644
index 8d964dce3c..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabcustomimage_info.py
+++ /dev/null
@@ -1,229 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlabcustomimage_info
-version_added: "2.9"
-short_description: Get Azure DevTest Lab Custom Image facts
-description:
- - Get facts of Azure Azure DevTest Lab Custom Image.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- type: str
- lab_name:
- description:
- - The name of the lab.
- required: True
- type: str
- name:
- description:
- - The name of the custom image.
- type: str
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- type: list
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of Custom Image
- azure_rm_devtestlabcustomimage_info:
- resource_group: myResourceGroup
- lab_name: myLab
- name: myImage
-
- - name: List instances of Custom Image in the lab
- azure_rm_devtestlabcustomimage_info:
- resource_group: myResourceGroup
- lab_name: myLab
- name: myImage
-'''
-
-RETURN = '''
-custom_images:
- description:
- - A list of dictionaries containing facts for Custom Image.
- returned: always
- type: complex
- contains:
- id:
- description:
- - The identifier of the artifact source.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/cu
- stomimages/myImage"
- resource_group:
- description:
- - Name of the resource group.
- returned: always
- type: str
- sample: myResourceGroup
- lab_name:
- description:
- - Name of the lab.
- returned: always
- type: str
- sample: myLab
- name:
- description:
- - The name of the image.
- returned: always
- type: str
- sample: myImage
- managed_shapshot_id:
- description:
- - Managed snapshot id.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.compute/snapshots/myImage"
- source_vm_id:
- description:
- - Source VM id.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx//resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/v
- irtualmachines/myLabVm"
- tags:
- description:
- - The tags of the resource.
- returned: always
- type: complex
- sample: "{ 'MyTag':'MyValue' }"
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMDtlCustomImageInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- lab_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- tags=dict(
- type='list'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.lab_name = None
- self.name = None
- self.tags = None
- super(AzureRMDtlCustomImageInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_devtestlabcustomimage_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_devtestlabcustomimage_facts' module has been renamed to 'azure_rm_devtestlabcustomimage_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if self.name:
- self.results['custom_images'] = self.get()
- else:
- self.results['custom_images'] = self.list()
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.custom_images.get(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Custom Image.')
-
- if response and self.has_tags(response.tags, self.tags):
- results.append(self.format_response(response))
-
- return results
-
- def list(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.custom_images.list(resource_group_name=self.resource_group,
- lab_name=self.lab_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Custom Image.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_response(item))
-
- return results
-
- def format_response(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.resource_group,
- 'lab_name': self.lab_name,
- 'name': d.get('name'),
- 'id': d.get('id'),
- 'managed_snapshot_id': d.get('managed_snapshot_id'),
- 'source_vm_id': d.get('vm', {}).get('source_vm_id'),
- 'tags': d.get('tags')
- }
- return d
-
-
-def main():
- AzureRMDtlCustomImageInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabenvironment.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlabenvironment.py
deleted file mode 100644
index be47f354c3..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabenvironment.py
+++ /dev/null
@@ -1,379 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlabenvironment
-version_added: "2.8"
-short_description: Manage Azure DevTest Lab Environment instance
-description:
- - Create, update and delete instance of Azure DevTest Lab Environment.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- lab_name:
- description:
- - The name of the lab.
- required: True
- user_name:
- description:
- - The name of the user profile.
- required: True
- name:
- description:
- - The name of the environment.
- required: True
- location:
- description:
- - The location of the resource.
- deployment_template:
- description:
- - The Azure Resource Manager template's identifier.
- deployment_parameters:
- description:
- - The parameters of the Azure Resource Manager template.
- type: list
- suboptions:
- name:
- description:
- - The name of the template parameter.
- value:
- description:
- - The value of the template parameter.
- state:
- description:
- - Assert the state of the Environment.
- - Use C((present) to create or update an Environment and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
-- name: Create instance of DevTest Lab Environment from public environment repo
- azure_rm_devtestlabenvironment:
- resource_group: myResourceGroup
- lab_name: myLab
- user_name: user
- name: myEnvironment
- location: eastus
- deployment_template:
- artifact_source_name: public environment repo
- name: WebApp
-'''
-
-RETURN = '''
-id:
- description:
- - The identifier of the resource.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/environment
- s/myEnvironment"
-
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _snake_to_camel
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrestazure.azure_operation import AzureOperationPoller
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMDtlEnvironment(AzureRMModuleBase):
- """Configuration class for an Azure RM Environment resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- lab_name=dict(
- type='str',
- required=True
- ),
- user_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- deployment_template=dict(
- type='raw'
- ),
- deployment_parameters=dict(
- type='list',
- options=dict(
- name=dict(
- type='str'
- ),
- value=dict(
- type='str'
- )
- )
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.lab_name = None
- self.user_name = None
- self.name = None
- self.dtl_environment = dict()
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMDtlEnvironment, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- self.dtl_environment[key] = kwargs[key]
-
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- resource_group = self.get_resource_group(self.resource_group)
- deployment_template = self.dtl_environment.pop('deployment_template', None)
- if deployment_template:
- if isinstance(deployment_template, dict):
- if all(key in deployment_template for key in ('artifact_source_name', 'name')):
- tmp = '/subscriptions/{0}/resourcegroups/{1}/providers/microsoft.devtestlab/labs/{2}/artifactSources/{3}/armTemplates/{4}'
- deployment_template = tmp.format(self.subscription_id,
- self.resource_group,
- self.lab_name,
- deployment_template['artifact_source_name'],
- deployment_template['name'])
- if not isinstance(deployment_template, str):
- self.fail("parameter error: expecting deployment_template to contain [artifact_source, name]")
- self.dtl_environment['deployment_properties'] = {}
- self.dtl_environment['deployment_properties']['arm_template_id'] = deployment_template
- self.dtl_environment['deployment_properties']['parameters'] = self.dtl_environment.pop('deployment_parameters', None)
-
- old_response = self.get_environment()
-
- if not old_response:
- self.log("Environment instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("Environment instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- if (not default_compare(self.dtl_environment, old_response, '', self.results)):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the Environment instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_environment()
-
- self.results['changed'] = True
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("Environment instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_environment()
- # This currently doesn't work as there is a bug in SDK / Service
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
- else:
- self.log("Environment instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if self.state == 'present':
- self.results.update({
- 'id': response.get('id', None)
- })
- return self.results
-
- def create_update_environment(self):
- '''
- Creates or updates Environment with the specified configuration.
-
- :return: deserialized Environment instance state dictionary
- '''
- self.log("Creating / Updating the Environment instance {0}".format(self.name))
-
- try:
- if self.to_do == Actions.Create:
- response = self.mgmt_client.environments.create_or_update(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- user_name=self.user_name,
- name=self.name,
- dtl_environment=self.dtl_environment)
- else:
- response = self.mgmt_client.environments.update(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- user_name=self.user_name,
- name=self.name,
- dtl_environment=self.dtl_environment)
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the Environment instance.')
- self.fail("Error creating the Environment instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_environment(self):
- '''
- Deletes specified Environment instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the Environment instance {0}".format(self.name))
- try:
- response = self.mgmt_client.environments.delete(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- user_name=self.user_name,
- name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the Environment instance.')
- self.fail("Error deleting the Environment instance: {0}".format(str(e)))
-
- return True
-
- def get_environment(self):
- '''
- Gets the properties of the specified Environment.
-
- :return: deserialized Environment instance state dictionary
- '''
- self.log("Checking if the Environment instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mgmt_client.environments.get(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- user_name=self.user_name,
- name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("Environment instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the Environment instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def default_compare(new, old, path, result):
- if new is None:
- return True
- elif isinstance(new, dict):
- if not isinstance(old, dict):
- result['compare'] = 'changed [' + path + '] old dict is null'
- return False
- for k in new.keys():
- if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
- return False
- return True
- elif isinstance(new, list):
- if not isinstance(old, list) or len(new) != len(old):
- result['compare'] = 'changed [' + path + '] length is different or null'
- return False
- if isinstance(old[0], dict):
- key = None
- if 'id' in old[0] and 'id' in new[0]:
- key = 'id'
- elif 'name' in old[0] and 'name' in new[0]:
- key = 'name'
- else:
- key = list(old[0])[0]
- new = sorted(new, key=lambda x: x.get(key, None))
- old = sorted(old, key=lambda x: x.get(key, None))
- else:
- new = sorted(new)
- old = sorted(old)
- for i in range(len(new)):
- if not default_compare(new[i], old[i], path + '/*', result):
- return False
- return True
- else:
- if path == '/location':
- new = new.replace(' ', '').lower()
- old = new.replace(' ', '').lower()
- if new == old:
- return True
- else:
- result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old)
- return False
-
-
-def main():
- """Main execution"""
- AzureRMDtlEnvironment()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabenvironment_info.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlabenvironment_info.py
deleted file mode 100644
index d3409c2395..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabenvironment_info.py
+++ /dev/null
@@ -1,245 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlabenvironment_info
-version_added: "2.9"
-short_description: Get Azure Environment facts
-description:
- - Get facts of Azure Environment.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- type: str
- lab_name:
- description:
- - The name of the lab.
- required: True
- type: str
- user_name:
- description:
- - The name of the user profile.
- required: True
- type: str
- name:
- description:
- - The name of the environment.
- type: str
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- type: list
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of Environment
- azure_rm_devtestlabenvironment_info:
- resource_group: myResourceGroup
- lab_name: myLab
- user_name: myUser
- name: myEnvironment
-'''
-
-RETURN = '''
-environments:
- description:
- - A list of dictionaries containing facts for Environment.
- returned: always
- type: complex
- contains:
- id:
- description:
- - The identifier of the artifact source.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/sc
- hedules/xxxxxxxx-xxxx-xxxx-xxxxx-xxxxxxxxxxxxx/environments/myEnvironment"
- resource_group:
- description:
- - Name of the resource group.
- returned: always
- type: str
- sample: myResourceGroup
- lab_name:
- description:
- - Name of the lab.
- returned: always
- type: str
- sample: myLab
- name:
- description:
- - The name of the environment.
- returned: always
- type: str
- sample: myEnvironment
- deployment_template:
- description:
- - The identifier of the artifact source.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/mylab/art
- ifactSources/public environment repo/armTemplates/WebApp"
- resource_group_id:
- description:
- - Target resource group id.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myLab-myEnvironment-982571"
- state:
- description:
- - Deployment state.
- returned: always
- type: str
- sample: Succeeded
- tags:
- description:
- - The tags of the resource.
- returned: always
- type: complex
- sample: "{ 'MyTag': 'MyValue' }"
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMDtlEnvironmentInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- lab_name=dict(
- type='str',
- required=True
- ),
- user_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.lab_name = None
- self.user_name = None
- self.name = None
- self.tags = None
- super(AzureRMDtlEnvironmentInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_devtestlabenvironment_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_devtestlabenvironment_facts' module has been renamed to 'azure_rm_devtestlabenvironment_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if self.name:
- self.results['environments'] = self.get()
- else:
- self.results['environments'] = self.list()
-
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.environments.get(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- user_name=self.user_name,
- name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Environment.')
-
- if response and self.has_tags(response.tags, self.tags):
- results.append(self.format_response(response))
-
- return results
-
- def list(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.environments.list(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- user_name=self.user_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Environment.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_response(item))
-
- return results
-
- def format_response(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.resource_group,
- 'lab_name': self.lab_name,
- 'name': d.get('name'),
- 'user_name': self.user_name,
- 'id': d.get('id', None),
- 'deployment_template': d.get('deployment_properties', {}).get('arm_template_id'),
- 'location': d.get('location'),
- 'provisioning_state': d.get('provisioning_state'),
- 'resource_group_id': d.get('resource_group_id'),
- 'tags': d.get('tags', None)
- }
- return d
-
-
-def main():
- AzureRMDtlEnvironmentInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabpolicy.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlabpolicy.py
deleted file mode 100644
index a52869a921..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabpolicy.py
+++ /dev/null
@@ -1,401 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlabpolicy
-version_added: "2.8"
-short_description: Manage Azure Policy instance
-description:
- - Create, update and delete instance of Azure Policy.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- lab_name:
- description:
- - The name of the lab.
- required: True
- policy_set_name:
- description:
- - The name of the policy set.
- required: True
- name:
- description:
- - The name of the policy.
- required: True
- description:
- description:
- - The description of the policy.
- fact_name:
- description:
- - The fact name of the policy (e.g. C(lab_vm_count), C(lab_vm_size)), MaxVmsAllowedPerLab, etc.
- choices:
- - 'user_owned_lab_vm_count'
- - 'user_owned_lab_premium_vm_count'
- - 'lab_vm_count'
- - 'lab_premium_vm_count'
- - 'lab_vm_size'
- - 'gallery_image'
- - 'user_owned_lab_vm_count_in_subnet'
- - 'lab_target_cost'
- threshold:
- description:
- - The threshold of the policy (it could be either a maximum value or a list of allowed values).
- type: raw
- state:
- description:
- - Assert the state of the Policy.
- - Use C(present) to create or update an Policy and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
-- name: Create DevTest Lab Policy
- azure_rm_devtestlabpolicy:
- resource_group: myResourceGroup
- lab_name: myLab
- policy_set_name: myPolicySet
- name: myPolicy
- fact_name: user_owned_lab_vm_count
- threshold: 5
-'''
-
-RETURN = '''
-id:
- description:
- - The identifier of the resource.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/policySets/
- myPolicySet/policies/myPolicy"
-
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _snake_to_camel
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrestazure.azure_operation import AzureOperationPoller
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMDtlPolicy(AzureRMModuleBase):
- """Configuration class for an Azure RM Policy resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- lab_name=dict(
- type='str',
- required=True
- ),
- policy_set_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- description=dict(
- type='str'
- ),
- fact_name=dict(
- type='str',
- choices=['user_owned_lab_vm_count',
- 'user_owned_lab_premium_vm_count',
- 'lab_vm_count',
- 'lab_premium_vm_count',
- 'lab_vm_size',
- 'gallery_image',
- 'user_owned_lab_vm_count_in_subnet',
- 'lab_target_cost']
- ),
- threshold=dict(
- type='raw'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.lab_name = None
- self.policy_set_name = None
- self.name = None
- self.policy = dict()
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.to_do = Actions.NoAction
-
- required_if = [
- ('state', 'present', ['threshold', 'fact_name'])
- ]
-
- super(AzureRMDtlPolicy, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True,
- required_if=required_if)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- self.policy[key] = kwargs[key]
-
- if self.state == 'present':
- self.policy['status'] = 'Enabled'
- dict_camelize(self.policy, ['fact_name'], True)
- if isinstance(self.policy['threshold'], list):
- self.policy['evaluator_type'] = 'AllowedValuesPolicy'
- else:
- self.policy['evaluator_type'] = 'MaxValuePolicy'
-
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- resource_group = self.get_resource_group(self.resource_group)
-
- old_response = self.get_policy()
-
- if not old_response:
- self.log("Policy instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("Policy instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- if (not default_compare(self.policy, old_response, '', self.results)):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the Policy instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_policy()
-
- self.results['changed'] = True
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("Policy instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_policy()
- # This currently doesnt' work as there is a bug in SDK / Service
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
- else:
- self.log("Policy instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if self.state == 'present':
- self.results.update({
- 'id': response.get('id', None),
- 'status': response.get('status', None)
- })
- return self.results
-
- def create_update_policy(self):
- '''
- Creates or updates Policy with the specified configuration.
-
- :return: deserialized Policy instance state dictionary
- '''
- self.log("Creating / Updating the Policy instance {0}".format(self.name))
-
- try:
- response = self.mgmt_client.policies.create_or_update(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- policy_set_name=self.policy_set_name,
- name=self.name,
- policy=self.policy)
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the Policy instance.')
- self.fail("Error creating the Policy instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_policy(self):
- '''
- Deletes specified Policy instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the Policy instance {0}".format(self.name))
- try:
- response = self.mgmt_client.policies.delete(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- policy_set_name=self.policy_set_name,
- name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the Policy instance.')
- self.fail("Error deleting the Policy instance: {0}".format(str(e)))
-
- return True
-
- def get_policy(self):
- '''
- Gets the properties of the specified Policy.
-
- :return: deserialized Policy instance state dictionary
- '''
- self.log("Checking if the Policy instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mgmt_client.policies.get(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- policy_set_name=self.policy_set_name,
- name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("Policy instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the Policy instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def default_compare(new, old, path, result):
- if new is None:
- return True
- elif isinstance(new, dict):
- if not isinstance(old, dict):
- result['compare'] = 'changed [' + path + '] old dict is null'
- return False
- for k in new.keys():
- if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
- return False
- return True
- elif isinstance(new, list):
- if not isinstance(old, list) or len(new) != len(old):
- result['compare'] = 'changed [' + path + '] length is different or null'
- return False
- if isinstance(old[0], dict):
- key = None
- if 'id' in old[0] and 'id' in new[0]:
- key = 'id'
- elif 'name' in old[0] and 'name' in new[0]:
- key = 'name'
- else:
- key = list(old[0])[0]
- new = sorted(new, key=lambda x: x.get(key, None))
- old = sorted(old, key=lambda x: x.get(key, None))
- else:
- new = sorted(new)
- old = sorted(old)
- for i in range(len(new)):
- if not default_compare(new[i], old[i], path + '/*', result):
- return False
- return True
- else:
- if path == '/location':
- new = new.replace(' ', '').lower()
- old = new.replace(' ', '').lower()
- if str(new) == str(old):
- return True
- else:
- result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old)
- return False
-
-
-def dict_camelize(d, path, camelize_first):
- if isinstance(d, list):
- for i in range(len(d)):
- dict_camelize(d[i], path, camelize_first)
- elif isinstance(d, dict):
- if len(path) == 1:
- old_value = d.get(path[0], None)
- if old_value is not None:
- d[path[0]] = _snake_to_camel(old_value, camelize_first)
- else:
- sd = d.get(path[0], None)
- if sd is not None:
- dict_camelize(sd, path[1:], camelize_first)
-
-
-def dict_map(d, path, map):
- if isinstance(d, list):
- for i in range(len(d)):
- dict_map(d[i], path, map)
- elif isinstance(d, dict):
- if len(path) == 1:
- old_value = d.get(path[0], None)
- if old_value is not None:
- d[path[0]] = map.get(old_value, old_value)
- else:
- sd = d.get(path[0], None)
- if sd is not None:
- dict_map(sd, path[1:], map)
-
-
-def main():
- """Main execution"""
- AzureRMDtlPolicy()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabpolicy_info.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlabpolicy_info.py
deleted file mode 100644
index 7df4596850..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabpolicy_info.py
+++ /dev/null
@@ -1,243 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlabpolicy_info
-version_added: "2.9"
-short_description: Get Azure DTL Policy facts
-description:
- - Get facts of Azure DTL Policy.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- type: str
- lab_name:
- description:
- - The name of the lab.
- required: True
- type: str
- policy_set_name:
- description:
- - The name of the policy set.
- required: True
- type: str
- name:
- description:
- - The name of the policy.
- type: str
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- type: list
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of Policy
- azure_rm_devtestlabpolicy_info:
- resource_group: myResourceGroup
- lab_name: myLab
- policy_set_name: myPolicySet
- name: myPolicy
-'''
-
-RETURN = '''
-policies:
- description:
- - A list of dictionaries containing facts for Policy.
- returned: always
- type: complex
- contains:
- id:
- description:
- - The identifier of the artifact source.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/po
- licysets/myPolicySet/policies/myPolicy"
- resource_group:
- description:
- - Name of the resource group.
- returned: always
- type: str
- sample: myResourceGroup
- lab_name:
- description:
- - Name of the lab.
- returned: always
- type: str
- sample: myLab
- name:
- description:
- - The name of the artifact source.
- returned: always
- type: str
- sample: myArtifactSource
- fact_name:
- description:
- - The name of the policy fact.
- returned: always
- type: str
- sample: UserOwnedLabVmCount
- evaluator_type:
- description:
- - Evaluator type for policy fact.
- returned: always
- type: str
- sample: MaxValuePolicy
- threshold:
- description:
- - Fact's threshold.
- returned: always
- type: str
- sample: 5
- tags:
- description:
- - The tags of the resource.
- returned: always
- type: complex
- sample: "{ 'MyTag': 'MyValue' }"
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMDtlPolicyInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- lab_name=dict(
- type='str',
- required=True
- ),
- policy_set_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.lab_name = None
- self.policy_set_name = None
- self.name = None
- self.tags = None
- super(AzureRMDtlPolicyInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_devtestlabpolicy_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_devtestlabpolicy_facts' module has been renamed to 'azure_rm_devtestlabpolicy_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if self.name:
- self.results['policies'] = self.get()
- else:
- self.results['policies'] = self.list()
-
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.policies.get(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- policy_set_name=self.policy_set_name,
- name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Policy.')
-
- if response and self.has_tags(response.tags, self.tags):
- results.append(self.format_response(response))
-
- return results
-
- def list(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.policies.list(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- policy_set_name=self.policy_set_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Policy.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_response(item))
-
- return results
-
- def format_response(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.resource_group,
- 'policy_set_name': self.policy_set_name,
- 'name': d.get('name'),
- 'id': d.get('id'),
- 'tags': d.get('tags'),
- 'status': d.get('status'),
- 'threshold': d.get('threshold'),
- 'fact_name': d.get('fact_name'),
- 'evaluator_type': d.get('evaluator_type')
- }
- return d
-
-
-def main():
- AzureRMDtlPolicyInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule.py
deleted file mode 100644
index 983e03675b..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule.py
+++ /dev/null
@@ -1,341 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlabschedule
-version_added: "2.8"
-short_description: Manage Azure DevTest Lab Schedule instance
-description:
- - Create, update and delete instance of Azure DecTest Lab Schedule.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- lab_name:
- description:
- - The name of the lab.
- required: True
- name:
- description:
- - The name of the schedule.
- required: True
- choices:
- - lab_vms_startup
- - lab_vms_shutdown
- time:
- description:
- - The time of day the schedule will occur.
- time_zone_id:
- description:
- - The time zone ID.
- state:
- description:
- - Assert the state of the Schedule.
- - Use C(present) to create or update an Schedule and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Create (or update) DevTest Lab Schedule
- azure_rm_devtestlabschedule:
- resource_group: myResourceGroup
- lab_name: myLab
- name: lab_vms_shutdown
- time: "1030"
- time_zone_id: "UTC+12"
-'''
-
-RETURN = '''
-id:
- description:
- - The identifier of the resource.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/schedules/l
- abVmsShutdown"
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _snake_to_camel
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrestazure.azure_operation import AzureOperationPoller
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMSchedule(AzureRMModuleBase):
- """Configuration class for an Azure RM Schedule resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- lab_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True,
- choices=['lab_vms_startup', 'lab_vms_shutdown']
- ),
- time=dict(
- type='str'
- ),
- time_zone_id=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.lab_name = None
- self.name = None
- self.schedule = dict()
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.to_do = Actions.NoAction
-
- required_if = [
- ('state', 'present', ['time', 'time_zone_id'])
- ]
-
- super(AzureRMSchedule, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True,
- required_if=required_if)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- self.schedule[key] = kwargs[key]
-
- self.schedule['status'] = "Enabled"
-
- if self.name == 'lab_vms_startup':
- self.name = 'LabVmsStartup'
- self.schedule['task_type'] = 'LabVmsStartupTask'
- elif self.name == 'lab_vms_shutdown':
- self.name = 'LabVmsShutdown'
- self.schedule['task_type'] = 'LabVmsShutdownTask'
-
- if self.state == 'present':
- self.schedule['daily_recurrence'] = {'time': self.schedule.pop('time')}
- self.schedule['time_zone_id'] = self.schedule['time_zone_id'].upper()
-
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- resource_group = self.get_resource_group(self.resource_group)
-
- old_response = self.get_schedule()
-
- if not old_response:
- self.log("Schedule instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("Schedule instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- if (not default_compare(self.schedule, old_response, '', self.results)):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the Schedule instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_schedule()
-
- self.results['changed'] = True
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("Schedule instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_schedule()
- # This currently doesn't work as there is a bug in SDK / Service
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
- else:
- self.log("Schedule instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if self.state == 'present':
- self.results.update({
- 'id': response.get('id', None)
- })
- return self.results
-
- def create_update_schedule(self):
- '''
- Creates or updates Schedule with the specified configuration.
-
- :return: deserialized Schedule instance state dictionary
- '''
- self.log("Creating / Updating the Schedule instance {0}".format(self.name))
-
- try:
- response = self.mgmt_client.schedules.create_or_update(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name,
- schedule=self.schedule)
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the Schedule instance.')
- self.fail("Error creating the Schedule instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_schedule(self):
- '''
- Deletes specified Schedule instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the Schedule instance {0}".format(self.name))
- try:
- response = self.mgmt_client.schedules.delete(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the Schedule instance.')
- self.fail("Error deleting the Schedule instance: {0}".format(str(e)))
-
- return True
-
- def get_schedule(self):
- '''
- Gets the properties of the specified Schedule.
-
- :return: deserialized Schedule instance state dictionary
- '''
- self.log("Checking if the Schedule instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mgmt_client.schedules.get(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("Schedule instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the Schedule instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def default_compare(new, old, path, result):
- if new is None:
- return True
- elif isinstance(new, dict):
- if not isinstance(old, dict):
- result['compare'] = 'changed [' + path + '] old dict is null'
- return False
- for k in new.keys():
- if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
- return False
- return True
- elif isinstance(new, list):
- if not isinstance(old, list) or len(new) != len(old):
- result['compare'] = 'changed [' + path + '] length is different or null'
- return False
- if isinstance(old[0], dict):
- key = None
- if 'id' in old[0] and 'id' in new[0]:
- key = 'id'
- elif 'name' in old[0] and 'name' in new[0]:
- key = 'name'
- else:
- key = list(old[0])[0]
- new = sorted(new, key=lambda x: x.get(key, None))
- old = sorted(old, key=lambda x: x.get(key, None))
- else:
- new = sorted(new)
- old = sorted(old)
- for i in range(len(new)):
- if not default_compare(new[i], old[i], path + '/*', result):
- return False
- return True
- else:
- if path == '/location':
- new = new.replace(' ', '').lower()
- old = new.replace(' ', '').lower()
- if new == old:
- return True
- else:
- result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old)
- return False
-
-
-def main():
- """Main execution"""
- AzureRMSchedule()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule_info.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule_info.py
deleted file mode 100644
index f620d2c8e1..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule_info.py
+++ /dev/null
@@ -1,222 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlabschedule_info
-version_added: "2.9"
-short_description: Get Azure Schedule facts
-description:
- - Get facts of Azure Schedule.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- type: str
- lab_name:
- description:
- - The name of the lab.
- required: True
- type: str
- name:
- description:
- - The name of the schedule.
- type: str
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- type: list
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of Schedule
- azure_rm_devtestlabschedule_info:
- resource_group: myResourceGroup
- lab_name: myLab
- name: mySchedule
-'''
-
-RETURN = '''
-schedules:
- description:
- - A list of dictionaries containing facts for Schedule.
- returned: always
- type: complex
- contains:
- id:
- description:
- - The identifier of the artifact source.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DevTestLab/labs/myLab/sc
- hedules/labvmsshutdown"
- resource_group:
- description:
- - Name of the resource group.
- returned: always
- type: str
- sample: myResourceGroup
- lab_name:
- description:
- - Name of the lab.
- returned: always
- type: str
- sample: myLab
- name:
- description:
- - The name of the environment.
- returned: always
- type: str
- sample: lab_vms_shutdown
- time:
- description:
- - Time of the schedule.
- returned: always
- type: str
- sample: lab_vms_shutdown
- time_zone_id:
- description:
- - Time zone id.
- returned: always
- type: str
- sample: UTC+12
- tags:
- description:
- - The tags of the resource.
- returned: always
- type: complex
- sample: "{ 'MyTag': 'MyValue' }"
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _camel_to_snake, _snake_to_camel
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMDtlScheduleInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- lab_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.lab_name = None
- self.name = None
- self.tags = None
- super(AzureRMDtlScheduleInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_devtestlabschedule_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_devtestlabschedule_facts' module has been renamed to 'azure_rm_devtestlabschedule_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
- if self.name:
- self.results['schedules'] = self.get()
- else:
- self.results['schedules'] = self.list()
-
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.schedules.get(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=_snake_to_camel(self.name))
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Schedule.')
-
- if response and self.has_tags(response.tags, self.tags):
- results.append(self.format_response(response))
-
- return results
-
- def list(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.schedules.list(resource_group_name=self.resource_group,
- lab_name=self.lab_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Schedule.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_response(item))
-
- return results
-
- def format_response(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.resource_group,
- 'lab_name': self.lab_name,
- 'name': _camel_to_snake(d.get('name')),
- 'id': d.get('id', None),
- 'tags': d.get('tags', None),
- 'time': d.get('daily_recurrence', {}).get('time'),
- 'time_zone_id': d.get('time_zone_id')
- }
- return d
-
-
-def main():
- AzureRMDtlScheduleInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine.py
deleted file mode 100644
index e099cfef1d..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine.py
+++ /dev/null
@@ -1,544 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlabvirtualmachine
-version_added: "2.8"
-short_description: Manage Azure DevTest Lab Virtual Machine instance
-description:
- - Create, update and delete instance of Azure DevTest Lab Virtual Machine.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- lab_name:
- description:
- - The name of the lab.
- required: True
- name:
- description:
- - The name of the virtual machine.
- required: True
- notes:
- description:
- - The notes of the virtual machine.
- os_type:
- description:
- - Base type of operating system.
- choices:
- - windows
- - linux
- vm_size:
- description:
- - A valid Azure VM size value. For example, C(Standard_D4).
- - The list of choices varies depending on the subscription and location. Check your subscription for available choices.
- - Available values can be found on this website, link U(https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-general).
- - Required when I(state=present).
- user_name:
- description:
- - The user name of the virtual machine.
- password:
- description:
- - The password of the virtual machine administrator.
- ssh_key:
- description:
- - The SSH key of the virtual machine administrator.
- lab_subnet:
- description:
- - An existing subnet within lab's virtual network.
- - It can be the subnet's resource id.
- - It can be a dict which contains C(virtual_network_name) and C(name).
- disallow_public_ip_address:
- description:
- - Indicates whether the virtual machine is to be created without a public IP address.
- artifacts:
- description:
- - The artifacts to be installed on the virtual machine.
- type: list
- suboptions:
- source_name:
- description:
- - The artifact's source name.
- source_path:
- description:
- - The artifact's path in the source repository.
- parameters:
- description:
- - The parameters of the artifact.
- type: list
- suboptions:
- name:
- description:
- - The name of the artifact parameter.
- value:
- description:
- - The value of the artifact parameter.
- image:
- description:
- - The Microsoft Azure Marketplace image reference of the virtual machine.
- suboptions:
- offer:
- description:
- - The offer of the gallery image.
- publisher:
- description:
- - The publisher of the gallery image.
- sku:
- description:
- - The SKU of the gallery image.
- os_type:
- description:
- - The OS type of the gallery image.
- version:
- description:
- - The version of the gallery image.
- expiration_date:
- description:
- - The expiration date for VM.
- allow_claim:
- description:
- - Indicates whether another user can take ownership of the virtual machine.
- storage_type:
- description:
- - Storage type to use for virtual machine.
- choices:
- - standard
- - premium
- state:
- description:
- - Assert the state of the Virtual Machine.
- - Use C(present) to create or update an Virtual Machine and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Create (or update) Virtual Machine
- azure_rm_devtestlabvirtualmachine:
- resource_group: myrg
- lab_name: mylab
- name: myvm
- notes: Virtual machine notes....
- os_type: linux
- vm_size: Standard_A2_v2
- user_name: vmadmin
- password: ZSuppas$$21!
- lab_subnet:
- name: myvnSubnet
- virtual_network_name: myvn
- disallow_public_ip_address: no
- image:
- offer: UbuntuServer
- publisher: Canonical
- sku: 16.04-LTS
- os_type: Linux
- version: latest
- artifacts:
- - source_name: myartifact
- source_path: "/Artifacts/linux-install-mongodb"
- allow_claim: no
- expiration_date: "2019-02-22T01:49:12.117974Z"
-'''
-
-RETURN = '''
-id:
- description:
- - The identifier of the DTL Virtual Machine resource.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myrg/providers/microsoft.devtestlab/labs/mylab/virtualmachines/myvm
-compute_id:
- description:
- - The identifier of the underlying Compute Virtual Machine resource.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myrg/providers/microsoft.devtestlab/labs/mylab/virtualmachines/myvm
-fqdn:
- description:
- - Fully qualified domain name or IP Address of the virtual machine.
- returned: always
- type: str
- sample: myvm.eastus.cloudapp.azure.com
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _snake_to_camel
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrestazure.azure_operation import AzureOperationPoller
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMVirtualMachine(AzureRMModuleBase):
- """Configuration class for an Azure RM Virtual Machine resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- lab_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- notes=dict(
- type='str'
- ),
- os_type=dict(
- type='str',
- choices=['linux', 'windows']
- ),
- vm_size=dict(
- type='str'
- ),
- user_name=dict(
- type='str'
- ),
- password=dict(
- type='str',
- no_log=True
- ),
- ssh_key=dict(
- type='str',
- no_log=True
- ),
- lab_subnet=dict(
- type='raw'
- ),
- disallow_public_ip_address=dict(
- type='str'
- ),
- artifacts=dict(
- type='list',
- options=dict(
- artifact_id=dict(
- type='str'
- ),
- parameters=dict(
- type='list',
- options=dict(
- name=dict(
- type='str'
- ),
- value=dict(
- type='str'
- )
- )
- )
- )
- ),
- image=dict(
- type='dict',
- options=dict(
- offer=dict(
- type='str'
- ),
- publisher=dict(
- type='str'
- ),
- sku=dict(
- type='str'
- ),
- os_type=dict(
- type='str'
- ),
- version=dict(
- type='str'
- )
- )
- ),
- expiration_date=dict(
- type='str'
- ),
- allow_claim=dict(
- type='str'
- ),
- storage_type=dict(
- type='str',
- choices=['standard', 'premium']
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- required_if = [
- ('state', 'present', [
- 'image', 'lab_subnet', 'vm_size', 'os_type'])
- ]
-
- self.resource_group = None
- self.lab_name = None
- self.name = None
- self.lab_virtual_machine = dict()
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMVirtualMachine, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True,
- required_if=required_if)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- self.lab_virtual_machine[key] = kwargs[key]
-
- self.lab_virtual_machine['gallery_image_reference'] = self.lab_virtual_machine.pop('image', None)
-
- if self.lab_virtual_machine.get('artifacts') is not None:
- for artifact in self.lab_virtual_machine.get('artifacts'):
- source_name = artifact.pop('source_name')
- source_path = artifact.pop('source_path')
- template = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.DevTestLab/labs/{2}/artifactsources/{3}{4}"
- artifact['artifact_id'] = template.format(self.subscription_id, self.resource_group, self.lab_name, source_name, source_path)
-
- self.lab_virtual_machine['size'] = self.lab_virtual_machine.pop('vm_size')
- self.lab_virtual_machine['os_type'] = _snake_to_camel(self.lab_virtual_machine['os_type'], True)
-
- if self.lab_virtual_machine.get('storage_type'):
- self.lab_virtual_machine['storage_type'] = _snake_to_camel(self.lab_virtual_machine['storage_type'], True)
-
- lab_subnet = self.lab_virtual_machine.pop('lab_subnet')
-
- if isinstance(lab_subnet, str):
- vn_and_subnet = lab_subnet.split('/subnets/')
- if (len(vn_and_subnet) == 2):
- self.lab_virtual_machine['lab_virtual_network_id'] = vn_and_subnet[0]
- self.lab_virtual_machine['lab_subnet_name'] = vn_and_subnet[1]
- else:
- self.fail("Invalid 'lab_subnet' resource id format")
- else:
- template = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.DevTestLab/labs/{2}/virtualnetworks/{3}"
- self.lab_virtual_machine['lab_virtual_network_id'] = template.format(self.subscription_id,
- self.resource_group,
- self.lab_name,
- lab_subnet.get('virtual_network_name'))
- self.lab_virtual_machine['lab_subnet_name'] = lab_subnet.get('name')
-
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- old_response = self.get_virtualmachine()
-
- if not old_response:
- self.log("Virtual Machine instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- # get location from the lab as it has to be the same and has to be specified (why??)
- lab = self.get_devtestlab()
- self.lab_virtual_machine['location'] = lab['location']
- else:
- self.log("Virtual Machine instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.lab_virtual_machine['location'] = old_response['location']
-
- if old_response['size'].lower() != self.lab_virtual_machine.get('size').lower():
- self.lab_virtual_machine['size'] = old_response['size']
- self.module.warn("Property 'size' cannot be changed")
-
- if self.lab_virtual_machine.get('storage_type') is not None and \
- old_response['storage_type'].lower() != self.lab_virtual_machine.get('storage_type').lower():
- self.lab_virtual_machine['storage_type'] = old_response['storage_type']
- self.module.warn("Property 'storage_type' cannot be changed")
-
- if old_response.get('gallery_image_reference', {}) != self.lab_virtual_machine.get('gallery_image_reference', {}):
- self.lab_virtual_machine['gallery_image_reference'] = old_response['gallery_image_reference']
- self.module.warn("Property 'image' cannot be changed")
-
- # currently artifacts can be only specified when vm is created
- # and in addition we don't have detailed information, just a number of "total artifacts"
- if len(self.lab_virtual_machine.get('artifacts', [])) != old_response['artifact_deployment_status']['total_artifacts']:
- self.module.warn("Property 'artifacts' cannot be changed")
-
- if self.lab_virtual_machine.get('disallow_public_ip_address') is not None:
- if old_response['disallow_public_ip_address'] != self.lab_virtual_machine.get('disallow_public_ip_address'):
- self.module.warn("Property 'disallow_public_ip_address' cannot be changed")
- self.lab_virtual_machine['disallow_public_ip_address'] = old_response['disallow_public_ip_address']
-
- if self.lab_virtual_machine.get('allow_claim') is not None:
- if old_response['allow_claim'] != self.lab_virtual_machine.get('allow_claim'):
- self.module.warn("Property 'allow_claim' cannot be changed")
- self.lab_virtual_machine['allow_claim'] = old_response['allow_claim']
-
- if self.lab_virtual_machine.get('notes') is not None:
- if old_response['notes'] != self.lab_virtual_machine.get('notes'):
- self.to_do = Actions.Update
- else:
- self.lab_virtual_machine['notes'] = old_response['notes']
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the Virtual Machine instance")
-
- self.results['changed'] = True
- if self.check_mode:
- return self.results
-
- response = self.create_update_virtualmachine()
-
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("Virtual Machine instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_virtualmachine()
- else:
- self.log("Virtual Machine instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if self.state == 'present':
- self.results.update({
- 'id': response.get('id', None),
- 'compute_id': response.get('compute_id', None),
- 'fqdn': response.get('fqdn', None)
- })
- return self.results
-
- def create_update_virtualmachine(self):
- '''
- Creates or updates Virtual Machine with the specified configuration.
-
- :return: deserialized Virtual Machine instance state dictionary
- '''
- self.log("Creating / Updating the Virtual Machine instance {0}".format(self.name))
-
- try:
- response = self.mgmt_client.virtual_machines.create_or_update(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name,
- lab_virtual_machine=self.lab_virtual_machine)
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the Virtual Machine instance.')
- self.fail("Error creating the Virtual Machine instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_virtualmachine(self):
- '''
- Deletes specified Virtual Machine instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the Virtual Machine instance {0}".format(self.name))
- try:
- response = self.mgmt_client.virtual_machines.delete(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the Virtual Machine instance.')
- self.fail("Error deleting the Virtual Machine instance: {0}".format(str(e)))
-
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
-
- return True
-
- def get_virtualmachine(self):
- '''
- Gets the properties of the specified Virtual Machine.
-
- :return: deserialized Virtual Machine instance state dictionary
- '''
- self.log("Checking if the Virtual Machine instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mgmt_client.virtual_machines.get(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("Virtual Machine instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the Virtual Machine instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
- def get_devtestlab(self):
- '''
- Gets the properties of the specified DevTest Lab.
-
- :return: deserialized DevTest Lab instance state dictionary
- '''
- self.log("Checking if the DevTest Lab instance {0} is present".format(self.lab_name))
- try:
- response = self.mgmt_client.labs.get(resource_group_name=self.resource_group,
- name=self.lab_name)
- self.log("Response : {0}".format(response))
- self.log("DevTest Lab instance : {0} found".format(response.name))
- return response.as_dict()
- except CloudError as e:
- self.fail('Did not find the DevTest Lab instance.')
- return False
-
-
-def main():
- """Main execution"""
- AzureRMVirtualMachine()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine_info.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine_info.py
deleted file mode 100644
index e21b2388a4..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine_info.py
+++ /dev/null
@@ -1,329 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlabvirtualmachine_info
-version_added: "2.9"
-short_description: Get Azure DevTest Lab Virtual Machine facts
-description:
- - Get facts of Azure DevTest Lab Virtual Machine.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- type: str
- lab_name:
- description:
- - The name of the lab.
- required: True
- type: str
- name:
- description:
- - The name of the virtual machine.
- type: str
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- type: list
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of DTL Virtual Machine
- azure_rm_devtestlabvirtualmachine_info:
- resource_group: myResourceGroup
- lab_name: myLab
- name: myVm
-'''
-
-RETURN = '''
-virtualmachines:
- description:
- - A list of dictionaries containing facts for DevTest Lab Virtual Machine.
- returned: always
- type: complex
- contains:
- id:
- description:
- - The identifier of the virtual machine.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/virt
- ualmachines/myVm"
- resource_group:
- description:
- - Name of the resource group.
- returned: always
- type: str
- sample: myResourceGroup
- lab_name:
- description:
- - Name of the lab.
- returned: always
- type: str
- sample: myLab
- name:
- description:
- - Name of the virtual machine.
- returned: always
- type: str
- sample: myVm
- notes:
- description:
- - Notes of the virtual machine.
- returned: always
- type: str
- sample: My VM notes
- disallow_public_ip_address:
- description:
- - Whether public IP should be not allowed.
- returned: always
- type: bool
- sample: false
- expiration_date:
- description:
- - Virtual machine expiration date.
- returned: always
- type: str
- sample: "2029-02-22T01:49:12.117974Z"
- image:
- description:
- - Gallery image reference.
- returned: always
- type: complex
- contains:
- offer:
- description:
- - The offer of the gallery image.
- returned: when created from gallery image
- type: str
- sample: UbuntuServer
- os_type:
- description:
- - Operating system type.
- returned: when created from gallery image
- type: str
- sample: Linux
- sku:
- description:
- - The SKU of the gallery image.
- returned: when created from gallery image
- type: str
- sample: 16.04-LTS
- publisher:
- description:
- - The publisher of the gallery image.
- returned: when created from gallery image
- type: str
- sample: Canonical
- version:
- description:
- - The version of the gallery image.
- returned: when created from gallery image
- type: str
- sample: latest
- os_type:
- description:
- - Operating system type.
- returned: always
- type: str
- sample: linux
- vm_size:
- description:
- - Virtual machine size.
- returned: always
- type: str
- sample: Standard_A2_v2
- user_name:
- description:
- - Admin user name.
- returned: always
- type: str
- sample: dtl_admin
- storage_type:
- description:
- - Storage type to use for virtual machine.
- returned: always
- type: str
- sample: standard
- compute_vm_id:
- description:
- - Resource id of compute virtual machine.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myLab-myVm-097933/providers/Microsoft.Compute/virtualMachines/myVm
- compute_vm_resource_group:
- description:
- - Resource group where compute virtual machine is created.
- returned: always
- type: str
- sample: myLab-myVm-097933
- compute_vm_name:
- description:
- - Name of compute virtual machine.
- returned: always
- type: str
- sample: myVm
- fqdn:
- description:
- - Fully qualified domain name.
- returned: always
- type: str
- sample: myvm.eastus.cloudapp.azure.com
- provisioning_state:
- description:
- - Provisioning state of the virtual network.
- returned: always
- type: str
- sample: Succeeded
- tags:
- description:
- - The tags of the resource.
- returned: always
- type: complex
- sample: "{ 'foo': 'bar' }"
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMDtlVirtualMachineInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- lab_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.lab_name = None
- self.name = None
- self.tags = None
- super(AzureRMDtlVirtualMachineInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_devtestlabvirtualmachine_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_devtestlabvirtualmachine_facts' module has been renamed to 'azure_rm_devtestlabvirtualmachine_info'",
- version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if self.name:
- self.results['virtualmachines'] = self.get()
- else:
- self.results['virtualmachines'] = self.list()
-
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.virtual_machines.get(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not get facts for Virtual Machine.')
-
- if response and self.has_tags(response.tags, self.tags):
- results.append(self.format_response(response))
-
- return results
-
- def list(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.virtual_machines.list(resource_group_name=self.resource_group,
- lab_name=self.lab_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not get facts for Virtual Machine.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_response(item))
- return results
-
- def format_response(self, item):
- d = item.as_dict()
- d = {
- 'id': d.get('id', None),
- 'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'),
- 'lab_name': self.parse_resource_to_dict(d.get('id')).get('name'),
- 'name': d.get('name'),
- 'notes': d.get('notes'),
- 'disallow_public_ip_address': d.get('disallow_public_ip_address'),
- 'expiration_date': d.get('expiration_date'),
- 'image': d.get('gallery_image_reference'),
- 'os_type': d.get('os_type').lower(),
- 'vm_size': d.get('size'),
- 'user_name': d.get('user_name'),
- 'storage_type': d.get('storage_type').lower(),
- 'compute_vm_id': d.get('compute_id'),
- 'compute_vm_resource_group': self.parse_resource_to_dict(d.get('compute_id')).get('resource_group'),
- 'compute_vm_name': self.parse_resource_to_dict(d.get('compute_id')).get('name'),
- 'fqdn': d.get('fqdn'),
- 'provisioning_state': d.get('provisioning_state'),
- 'tags': d.get('tags', None)
- }
- return d
-
-
-def main():
- AzureRMDtlVirtualMachineInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualnetwork.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualnetwork.py
deleted file mode 100644
index 5a2b1ef324..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualnetwork.py
+++ /dev/null
@@ -1,293 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlabvirtualnetwork
-version_added: "2.8"
-short_description: Manage Azure DevTest Lab Virtual Network instance
-description:
- - Create, update and delete instance of Azure DevTest Lab Virtual Network.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- lab_name:
- description:
- - The name of the lab.
- required: True
- name:
- description:
- - The name of the virtual network.
- required: True
- location:
- description:
- - The location of the resource.
- description:
- description:
- - The description of the virtual network.
- state:
- description:
- - Assert the state of the Virtual Network.
- - Use C(present) to create or update an Virtual Network and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Create (or update) Virtual Network
- azure_rm_devtestlabvirtualnetwork:
- resource_group: myResourceGroup
- lab_name: mylab
- name: myvn
- description: My Lab Virtual Network
-'''
-
-RETURN = '''
-id:
- description:
- - The identifier of the resource.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/testrg/providers/microsoft.devtestlab/
- mylab/mylab/virtualnetworks/myvn"
-external_provider_resource_id:
- description:
- - The identifier of external virtual network.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/testrg/providers/Microsoft.Network/vi
- rtualNetworks/myvn"
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _snake_to_camel
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrestazure.azure_operation import AzureOperationPoller
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMDevTestLabVirtualNetwork(AzureRMModuleBase):
- """Configuration class for an Azure RM Virtual Network resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- lab_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- description=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.lab_name = None
- self.name = None
- self.virtual_network = {}
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMDevTestLabVirtualNetwork, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- self.virtual_network[key] = kwargs[key]
-
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2018-10-15')
-
- resource_group = self.get_resource_group(self.resource_group)
- if self.virtual_network.get('location') is None:
- self.virtual_network['location'] = resource_group.location
-
- # subnet overrides for virtual network and subnet created by default
- template = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/virtualNetworks/{2}/subnets/{3}"
- subnet_id = template.format(self.subscription_id,
- self.resource_group,
- self.name,
- self.name + "Subnet")
- self.virtual_network['subnet_overrides'] = [{
- 'resource_id': subnet_id,
- 'lab_subnet_name': self.name + "Subnet",
- 'use_in_vm_creation_permission': 'Allow',
- 'use_public_ip_address_permission': 'Allow'
- }]
-
- old_response = self.get_virtualnetwork()
-
- if not old_response:
- self.log("Virtual Network instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("Virtual Network instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- if self.virtual_network.get('description') is not None and self.virtual_network.get('description') != old_response.get('description'):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the Virtual Network instance")
- self.results['changed'] = True
- if self.check_mode:
- return self.results
- response = self.create_update_virtualnetwork()
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("Virtual Network instance deleted")
- self.results['changed'] = True
- if self.check_mode:
- return self.results
- self.delete_virtualnetwork()
- # This currently doesn't work as there is a bug in SDK / Service
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
- else:
- self.log("Virtual Network instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if self.state == 'present':
- self.results.update({
- 'id': response.get('id', None),
- 'external_provider_resource_id': response.get('external_provider_resource_id', None)
- })
- return self.results
-
- def create_update_virtualnetwork(self):
- '''
- Creates or updates Virtual Network with the specified configuration.
-
- :return: deserialized Virtual Network instance state dictionary
- '''
- self.log("Creating / Updating the Virtual Network instance {0}".format(self.name))
-
- try:
- response = self.mgmt_client.virtual_networks.create_or_update(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name,
- virtual_network=self.virtual_network)
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the Virtual Network instance.')
- self.fail("Error creating the Virtual Network instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_virtualnetwork(self):
- '''
- Deletes specified Virtual Network instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the Virtual Network instance {0}".format(self.name))
- try:
- response = self.mgmt_client.virtual_networks.delete(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the Virtual Network instance.')
- self.fail("Error deleting the Virtual Network instance: {0}".format(str(e)))
-
- return True
-
- def get_virtualnetwork(self):
- '''
- Gets the properties of the specified Virtual Network.
-
- :return: deserialized Virtual Network instance state dictionary
- '''
- self.log("Checking if the Virtual Network instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mgmt_client.virtual_networks.get(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("Virtual Network instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the Virtual Network instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMDevTestLabVirtualNetwork()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualnetwork_info.py b/lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualnetwork_info.py
deleted file mode 100644
index c96941c6e8..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualnetwork_info.py
+++ /dev/null
@@ -1,221 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_devtestlabvirtualnetwork_info
-version_added: "2.9"
-short_description: Get Azure DevTest Lab Virtual Network facts
-description:
- - Get facts of Azure DevTest Lab Virtual Network.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- type: str
- lab_name:
- description:
- - The name of DevTest Lab.
- required: True
- type: str
- name:
- description:
- - The name of DevTest Lab Virtual Network.
- type: str
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of DevTest Lab Virtual Network
- azure_rm_devtestlabvirtualnetwork_info:
- resource_group: myResourceGroup
- lab_name: myLab
- name: myVirtualNetwork
-
- - name: List all Virtual Networks in DevTest Lab
- azure_rm_devtestlabvirtualnetwork_info:
- resource_group: myResourceGroup
- lab_name: myLab
- name: myVirtualNetwork
-'''
-
-RETURN = '''
-virtualnetworks:
- description:
- - A list of dictionaries containing facts for DevTest Lab Virtual Network.
- returned: always
- type: complex
- contains:
- id:
- description:
- - The identifier of the virtual network.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/virt
- ualnetworks/myVirtualNetwork"
- resource_group:
- description:
- - Name of the resource group.
- returned: always
- type: str
- sample: myResourceGroup
- lab_name:
- description:
- - Name of the lab.
- returned: always
- type: str
- sample: myLab
- name:
- description:
- - Name of the virtual network.
- returned: always
- type: str
- sample: myVirtualNetwork
- description:
- description:
- - Description of the virtual network.
- returned: always
- type: str
- sample: My Virtual Network
- external_provider_resource_id:
- description:
- - Resource id of an external virtual network.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/my
- VirtualNetwork"
- provisioning_state:
- description:
- - Provisioning state of the virtual network.
- returned: always
- type: str
- sample: Succeeded
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.devtestlabs import DevTestLabsClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMDevTestLabVirtualNetworkInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- lab_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.lab_name = None
- self.name = None
- super(AzureRMDevTestLabVirtualNetworkInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_devtestlabvirtualnetwork_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_devtestlabvirtualnetwork_facts' module has been renamed to 'azure_rm_devtestlabvirtualnetwork_info'",
- version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if self.name:
- self.results['virtualnetworks'] = self.get()
- else:
- self.results['virtualnetworks'] = self.list()
-
- return self.results
-
- def list(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.virtual_networks.list(resource_group_name=self.resource_group,
- lab_name=self.lab_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not list Virtual Networks for DevTest Lab.')
-
- if response is not None:
- for item in response:
- results.append(self.format_response(item))
-
- return results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.virtual_networks.get(resource_group_name=self.resource_group,
- lab_name=self.lab_name,
- name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not get facts for Virtual Network.')
-
- if response:
- results.append(self.format_response(response))
-
- return results
-
- def format_response(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.resource_group,
- 'lab_name': self.lab_name,
- 'name': d.get('name', None),
- 'id': d.get('id', None),
- 'external_provider_resource_id': d.get('external_provider_resource_id', None),
- 'provisioning_state': d.get('provisioning_state', None),
- 'description': d.get('description', None)
- }
- return d
-
-
-def main():
- AzureRMDevTestLabVirtualNetworkInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset.py b/lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset.py
deleted file mode 100644
index c1ae72fd85..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset.py
+++ /dev/null
@@ -1,485 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Obezimnaka Boms, <t-ozboms@microsoft.com>
-# Copyright (c) 2017 Ansible Project
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_dnsrecordset
-
-version_added: "2.4"
-
-short_description: Create, delete and update DNS record sets and records
-
-description:
- - Creates, deletes, and updates DNS records sets and records within an existing Azure DNS Zone.
-
-options:
- resource_group:
- description:
- - Name of resource group.
- required: true
- zone_name:
- description:
- - Name of the existing DNS zone in which to manage the record set.
- required: true
- relative_name:
- description:
- - Relative name of the record set.
- required: true
- record_type:
- description:
- - The type of record set to create or delete.
- choices:
- - A
- - AAAA
- - CNAME
- - MX
- - NS
- - SRV
- - TXT
- - PTR
- - CAA
- - SOA
- required: true
- record_mode:
- description:
- - Whether existing record values not sent to the module should be purged.
- default: purge
- choices:
- - append
- - purge
- state:
- description:
- - Assert the state of the record set. Use C(present) to create or update and C(absent) to delete.
- default: present
- choices:
- - absent
- - present
- time_to_live:
- description:
- - Time to live of the record set in seconds.
- default: 3600
- records:
- description:
- - List of records to be created depending on the type of record (set).
- suboptions:
- preference:
- description:
- - Used for creating an C(MX) record set/records.
- priority:
- description:
- - Used for creating an C(SRV) record set/records.
- weight:
- description:
- - Used for creating an C(SRV) record set/records.
- port:
- description:
- - Used for creating an C(SRV) record set/records.
- entry:
- description:
- - Primary data value for all record types.
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Obezimnaka Boms (@ozboms)
- - Matt Davis (@nitzmahone)
-'''
-
-EXAMPLES = '''
-
-- name: ensure an "A" record set with multiple records
- azure_rm_dnsrecordset:
- resource_group: myResourceGroup
- relative_name: www
- zone_name: testing.com
- record_type: A
- records:
- - entry: 192.168.100.101
- - entry: 192.168.100.102
- - entry: 192.168.100.103
-
-- name: delete a record set
- azure_rm_dnsrecordset:
- resource_group: myResourceGroup
- record_type: A
- relative_name: www
- zone_name: testing.com
- state: absent
-
-- name: create multiple "A" record sets with multiple records
- azure_rm_dnsrecordset:
- resource_group: myResourceGroup
- zone_name: testing.com
- relative_name: "{{ item.name }}"
- record_type: "{{ item.type }}"
- records: "{{ item.records }}"
- with_items:
- - { name: 'servera', type: 'A', records: [ { entry: '10.10.10.20' }, { entry: '10.10.10.21' }] }
- - { name: 'serverb', type: 'A', records: [ { entry: '10.10.10.30' }, { entry: '10.10.10.41' }] }
- - { name: 'serverc', type: 'A', records: [ { entry: '10.10.10.40' }, { entry: '10.10.10.41' }] }
-
-- name: create SRV records in a new record set
- azure_rm_dnsrecordset:
- resource_group: myResourceGroup
- relative_name: _sip._tcp.testing.com
- zone_name: testing.com
- time_to_live: 7200
- record_type: SRV
- records:
- - entry: sip.testing.com
- preference: 10
- priority: 20
- weight: 10
- port: 5060
-
-- name: create PTR record in a new record set
- azure_rm_dnsrecordset:
- resource_group: myResourceGroup
- relative_name: 192.168.100.101.in-addr.arpa
- zone_name: testing.com
- record_type: PTR
- records:
- - entry: servera.testing.com
-
-- name: create TXT record in a new record set
- azure_rm_dnsrecordset:
- resource_group: myResourceGroup
- relative_name: mail.testing.com
- zone_name: testing.com
- record_type: TXT
- records:
- - entry: 'v=spf1 a -all'
-
-'''
-
-RETURN = '''
-state:
- description:
- - Current state of the DNS record set.
- returned: always
- type: complex
- contains:
- id:
- description:
- - The DNS record set ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxx......xxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/dnszones/b57dc95985712e4523282.com/A/www"
- name:
- description:
- - Relate name of the record set.
- returned: always
- type: str
- sample: 'www'
- fqdn:
- description:
- - Fully qualified domain name of the record set.
- returned: always
- type: str
- sample: www.b57dc95985712e4523282.com
- etag:
- description:
- - The etag of the record set.
- returned: always
- type: str
- sample: 692c3e92-a618-46fc-aecd-8f888807cd6c
- provisioning_state:
- description:
- - The DNS record set state.
- returned: always
- type: str
- sample: Succeeded
- target_resource:
- description:
- - The target resource of the record set.
- returned: always
- type: dict
- sample: {}
- ttl:
- description:
- - The TTL(time-to-live) of the records in the records set.
- returned: always
- type: int
- sample: 3600
- type:
- description:
- - The type of DNS record in this record set.
- returned: always
- type: str
- sample: A
- arecords:
- description:
- - A list of records in the record set.
- returned: always
- type: list
- sample: [
- {
- "ipv4_address": "192.0.2.2"
- },
- {
- "ipv4_address": "192.0.2.4"
- },
- {
- "ipv4_address": "192.0.2.8"
- }
- ]
-'''
-
-import inspect
-import sys
-
-from ansible.module_utils.basic import _load_params
-from ansible.module_utils.six import iteritems
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, HAS_AZURE
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-RECORD_ARGSPECS = dict(
- A=dict(
- ipv4_address=dict(type='str', required=True, aliases=['entry'])
- ),
- AAAA=dict(
- ipv6_address=dict(type='str', required=True, aliases=['entry'])
- ),
- CNAME=dict(
- cname=dict(type='str', required=True, aliases=['entry'])
- ),
- MX=dict(
- preference=dict(type='int', required=True),
- exchange=dict(type='str', required=True, aliases=['entry'])
- ),
- NS=dict(
- nsdname=dict(type='str', required=True, aliases=['entry'])
- ),
- PTR=dict(
- ptrdname=dict(type='str', required=True, aliases=['entry'])
- ),
- SRV=dict(
- priority=dict(type='int', required=True),
- port=dict(type='int', required=True),
- weight=dict(type='int', required=True),
- target=dict(type='str', required=True, aliases=['entry'])
- ),
- TXT=dict(
- value=dict(type='list', required=True, aliases=['entry'])
- ),
- SOA=dict(
- host=dict(type='str', aliases=['entry']),
- email=dict(type='str'),
- serial_number=dict(type='long'),
- refresh_time=dict(type='long'),
- retry_time=dict(type='long'),
- expire_time=dict(type='long'),
- minimum_ttl=dict(type='long')
- ),
- CAA=dict(
- value=dict(type='str', aliases=['entry']),
- flags=dict(type='int'),
- tag=dict(type='str')
- )
- # FUTURE: ensure all record types are supported (see https://github.com/Azure/azure-sdk-for-python/tree/master/azure-mgmt-dns/azure/mgmt/dns/models)
-)
-
-RECORDSET_VALUE_MAP = dict(
- A=dict(attrname='arecords', classobj='ARecord', is_list=True),
- AAAA=dict(attrname='aaaa_records', classobj='AaaaRecord', is_list=True),
- CNAME=dict(attrname='cname_record', classobj='CnameRecord', is_list=False),
- MX=dict(attrname='mx_records', classobj='MxRecord', is_list=True),
- NS=dict(attrname='ns_records', classobj='NsRecord', is_list=True),
- PTR=dict(attrname='ptr_records', classobj='PtrRecord', is_list=True),
- SRV=dict(attrname='srv_records', classobj='SrvRecord', is_list=True),
- TXT=dict(attrname='txt_records', classobj='TxtRecord', is_list=True),
- SOA=dict(attrname='soa_record', classobj='SoaRecord', is_list=False),
- CAA=dict(attrname='caa_records', classobj='CaaRecord', is_list=True)
- # FUTURE: add missing record types from https://github.com/Azure/azure-sdk-for-python/blob/master/azure-mgmt-dns/azure/mgmt/dns/models/record_set.py
-) if HAS_AZURE else {}
-
-
-class AzureRMRecordSet(AzureRMModuleBase):
-
- def __init__(self):
-
- # we're doing two-pass arg validation, sample and store the args internally to allow this
- _load_params()
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- relative_name=dict(type='str', required=True),
- zone_name=dict(type='str', required=True),
- record_type=dict(choices=RECORD_ARGSPECS.keys(), required=True, type='str'),
- record_mode=dict(choices=['append', 'purge'], default='purge'),
- state=dict(choices=['present', 'absent'], default='present', type='str'),
- time_to_live=dict(type='int', default=3600),
- records=dict(type='list', elements='dict')
- )
-
- required_if = [
- ('state', 'present', ['records'])
- ]
-
- self.results = dict(
- changed=False
- )
-
- # first-pass arg validation so we can get the record type- skip exec_module
- super(AzureRMRecordSet, self).__init__(self.module_arg_spec, required_if=required_if, supports_check_mode=True, skip_exec=True)
-
- # look up the right subspec and metadata
- record_subspec = RECORD_ARGSPECS.get(self.module.params['record_type'])
-
- # patch the right record shape onto the argspec
- self.module_arg_spec['records']['options'] = record_subspec
-
- self.resource_group = None
- self.relative_name = None
- self.zone_name = None
- self.record_type = None
- self.record_mode = None
- self.state = None
- self.time_to_live = None
- self.records = None
-
- # rerun validation and actually run the module this time
- super(AzureRMRecordSet, self).__init__(self.module_arg_spec, required_if=required_if, supports_check_mode=True)
-
- def exec_module(self, **kwargs):
- for key in self.module_arg_spec.keys():
- setattr(self, key, kwargs[key])
-
- zone = self.dns_client.zones.get(self.resource_group, self.zone_name)
- if not zone:
- self.fail('The zone {0} does not exist in the resource group {1}'.format(self.zone_name, self.resource_group))
-
- try:
- self.log('Fetching Record Set {0}'.format(self.relative_name))
- record_set = self.dns_client.record_sets.get(self.resource_group, self.zone_name, self.relative_name, self.record_type)
- self.results['state'] = self.recordset_to_dict(record_set)
- except CloudError:
- record_set = None
- # FUTURE: fail on anything other than ResourceNotFound
-
- record_type_metadata = RECORDSET_VALUE_MAP.get(self.record_type)
-
- # FUTURE: implement diff mode
-
- if self.state == 'present':
- # convert the input records to SDK objects
- self.input_sdk_records = self.create_sdk_records(self.records, self.record_type)
-
- if not record_set:
- changed = True
- else:
- # and use it to get the type-specific records
- server_records = getattr(record_set, record_type_metadata.get('attrname'))
-
- # compare the input records to the server records
- self.input_sdk_records, changed = self.records_changed(self.input_sdk_records, server_records)
-
- # also check top-level recordset properties
- changed |= record_set.ttl != self.time_to_live
-
- # FUTURE: add metadata/tag check on recordset
-
- self.results['changed'] |= changed
-
- elif self.state == 'absent':
- if record_set:
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- if self.results['changed']:
- if self.state == 'present':
- record_set_args = dict(
- ttl=self.time_to_live
- )
-
- record_set_args[record_type_metadata['attrname']] = self.input_sdk_records if record_type_metadata['is_list'] else self.input_sdk_records[0]
-
- record_set = self.dns_models.RecordSet(**record_set_args)
-
- self.results['state'] = self.create_or_update(record_set)
-
- elif self.state == 'absent':
- # delete record set
- self.delete_record_set()
-
- return self.results
-
- def create_or_update(self, record_set):
- try:
- record_set = self.dns_client.record_sets.create_or_update(resource_group_name=self.resource_group,
- zone_name=self.zone_name,
- relative_record_set_name=self.relative_name,
- record_type=self.record_type,
- parameters=record_set)
- return self.recordset_to_dict(record_set)
- except Exception as exc:
- self.fail("Error creating or updating dns record {0} - {1}".format(self.relative_name, exc.message or str(exc)))
-
- def delete_record_set(self):
- try:
- # delete the record set
- self.dns_client.record_sets.delete(resource_group_name=self.resource_group,
- zone_name=self.zone_name,
- relative_record_set_name=self.relative_name,
- record_type=self.record_type)
- except Exception as exc:
- self.fail("Error deleting record set {0} - {1}".format(self.relative_name, exc.message or str(exc)))
- return None
-
- def create_sdk_records(self, input_records, record_type):
- record = RECORDSET_VALUE_MAP.get(record_type)
- if not record:
- self.fail('record type {0} is not supported now'.format(record_type))
- record_sdk_class = getattr(self.dns_models, record.get('classobj'))
- return [record_sdk_class(**x) for x in input_records]
-
- def records_changed(self, input_records, server_records):
- # ensure we're always comparing a list, even for the single-valued types
- if not isinstance(server_records, list):
- server_records = [server_records]
-
- input_set = set([self.module.jsonify(x.as_dict()) for x in input_records])
- server_set = set([self.module.jsonify(x.as_dict()) for x in server_records])
-
- if self.record_mode == 'append': # only a difference if the server set is missing something from the input set
- input_set = server_set.union(input_set)
-
- # non-append mode; any difference in the sets is a change
- changed = input_set != server_set
-
- records = [self.module.from_json(x) for x in input_set]
- return self.create_sdk_records(records, self.record_type), changed
-
- def recordset_to_dict(self, recordset):
- result = recordset.as_dict()
- result['type'] = result['type'].strip('Microsoft.Network/dnszones/')
- return result
-
-
-def main():
- AzureRMRecordSet()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset_info.py b/lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset_info.py
deleted file mode 100644
index 1cf8a3da08..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset_info.py
+++ /dev/null
@@ -1,294 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Obezimnaka Boms, <t-ozboms@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_dnsrecordset_info
-
-version_added: "2.9"
-
-short_description: Get DNS Record Set facts
-
-description:
- - Get facts for a specific DNS Record Set in a Zone, or a specific type in all Zones or in one Zone etc.
-
-options:
- relative_name:
- description:
- - Only show results for a Record Set.
- resource_group:
- description:
- - Limit results by resource group. Required when filtering by name or type.
- zone_name:
- description:
- - Limit results by zones. Required when filtering by name or type.
- record_type:
- description:
- - Limit record sets by record type.
- top:
- description:
- - Limit the maximum number of record sets to return.
- type: int
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Ozi Boms (@ozboms)
-
-'''
-
-EXAMPLES = '''
-- name: Get facts for one Record Set
- azure_rm_dnsrecordset_info:
- resource_group: myResourceGroup
- zone_name: example.com
- relative_name: server10
- record_type: A
-- name: Get facts for all Type A Record Sets in a Zone
- azure_rm_dnsrecordset_info:
- resource_group: myResourceGroup
- zone_name: example.com
- record_type: A
-- name: Get all record sets in one zone
- azure_rm_dnsrecordset_info:
- resource_group: myResourceGroup
- zone_name: example.com
-'''
-
-RETURN = '''
-azure_dnsrecordset:
- description:
- - List of record set dicts.
- returned: always
- type: list
- example: [
- {
- "etag": "60ac0480-44dd-4881-a2ed-680d20b3978e",
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/dnszones/newzone.com/A/servera",
- "name": "servera",
- "properties": {
- "ARecords": [
- {
- "ipv4Address": "10.4.5.7"
- },
- {
- "ipv4Address": "2.4.5.8"
- }
- ],
- "TTL": 12900
- },
- "type": "Microsoft.Network/dnszones/A"
- }]
-dnsrecordsets:
- description:
- - List of record set dicts, which shares the same hierarchy as M(azure_rm_dnsrecordset) module's parameter.
- returned: always
- type: list
- contains:
- id:
- description:
- - ID of the dns recordset.
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/dnszones/newzone.
- com/A/servera"
- relative_name:
- description:
- - Name of the dns recordset.
- sample: servera
- record_type:
- description:
- - The type of the record set.
- - Can be C(A), C(AAAA), C(CNAME), C(MX), C(NS), C(SRV), C(TXT), C(PTR).
- sample: A
- time_to_live:
- description:
- - Time to live of the record set in seconds.
- sample: 12900
- records:
- description:
- - List of records depending on the type of recordset.
- sample: [
- {
- "ipv4Address": "10.4.5.7"
- },
- {
- "ipv4Address": "2.4.5.8"
- }
- ]
- provisioning_state:
- description:
- - Provision state of the resource.
- sample: Successed
- fqdn:
- description:
- - Fully qualified domain name of the record set.
- sample: www.newzone.com
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.common import AzureMissingResourceHttpError, AzureHttpError
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-AZURE_OBJECT_CLASS = 'RecordSet'
-
-
-RECORDSET_VALUE_MAP = dict(
- A='arecords',
- AAAA='aaaa_records',
- CNAME='cname_record',
- MX='mx_records',
- NS='ns_records',
- PTR='ptr_records',
- SRV='srv_records',
- TXT='txt_records',
- SOA='soa_record',
- CAA='caa_records'
- # FUTURE: add missing record types from https://github.com/Azure/azure-sdk-for-python/blob/master/azure-mgmt-dns/azure/mgmt/dns/models/record_set.py
-)
-
-
-class AzureRMRecordSetInfo(AzureRMModuleBase):
-
- def __init__(self):
-
- # define user inputs into argument
- self.module_arg_spec = dict(
- relative_name=dict(type='str'),
- resource_group=dict(type='str'),
- zone_name=dict(type='str'),
- record_type=dict(type='str'),
- top=dict(type='int')
- )
-
- # store the results of the module operation
- self.results = dict(
- changed=False,
- )
-
- self.relative_name = None
- self.resource_group = None
- self.zone_name = None
- self.record_type = None
- self.top = None
-
- super(AzureRMRecordSetInfo, self).__init__(self.module_arg_spec)
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_dnsrecordset_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_dnsrecordset_facts' module has been renamed to 'azure_rm_dnsrecordset_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if not self.top or self.top <= 0:
- self.top = None
-
- # create conditionals to catch errors when calling record facts
- if self.relative_name and not self.resource_group:
- self.fail("Parameter error: resource group required when filtering by name or record type.")
- if self.relative_name and not self.zone_name:
- self.fail("Parameter error: DNS Zone required when filtering by name or record type.")
-
- results = []
- # list the conditions for what to return based on input
- if self.relative_name is not None:
- # if there is a name listed, they want only facts about that specific Record Set itself
- results = self.get_item()
- elif self.record_type:
- # else, they just want all the record sets of a specific type
- results = self.list_type()
- elif self.zone_name:
- # if there is a zone name listed, then they want all the record sets in a zone
- results = self.list_zone()
-
- if is_old_facts:
- self.results['ansible_facts'] = {
- 'azure_dnsrecordset': self.serialize_list(results)
- }
- self.results['dnsrecordsets'] = self.curated_list(results)
- return self.results
-
- def get_item(self):
- self.log('Get properties for {0}'.format(self.relative_name))
- item = None
- results = []
-
- # try to get information for specific Record Set
- try:
- item = self.dns_client.record_sets.get(self.resource_group, self.zone_name, self.relative_name, self.record_type)
- except CloudError:
- pass
-
- results = [item]
- return results
-
- def list_type(self):
- self.log('Lists the record sets of a specified type in a DNS zone')
- try:
- response = self.dns_client.record_sets.list_by_type(self.resource_group, self.zone_name, self.record_type, top=self.top)
- except AzureHttpError as exc:
- self.fail("Failed to list for record type {0} - {1}".format(self.record_type, str(exc)))
-
- results = []
- for item in response:
- results.append(item)
- return results
-
- def list_zone(self):
- self.log('Lists all record sets in a DNS zone')
- try:
- response = self.dns_client.record_sets.list_by_dns_zone(self.resource_group, self.zone_name, top=self.top)
- except AzureHttpError as exc:
- self.fail("Failed to list for zone {0} - {1}".format(self.zone_name, str(exc)))
-
- results = []
- for item in response:
- results.append(item)
- return results
-
- def serialize_list(self, raws):
- return [self.serialize_obj(item, AZURE_OBJECT_CLASS) for item in raws] if raws else []
-
- def curated_list(self, raws):
- return [self.record_to_dict(item) for item in raws] if raws else []
-
- def record_to_dict(self, record):
- record_type = record.type[len('Microsoft.Network/dnszones/'):]
- records = getattr(record, RECORDSET_VALUE_MAP.get(record_type))
- if not isinstance(records, list):
- records = [records]
- return dict(
- id=record.id,
- relative_name=record.name,
- record_type=record_type,
- records=[x.as_dict() for x in records],
- time_to_live=record.ttl,
- fqdn=record.fqdn,
- provisioning_state=record.provisioning_state
- )
-
-
-def main():
- AzureRMRecordSetInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_dnszone.py b/lib/ansible/modules/cloud/azure/azure_rm_dnszone.py
deleted file mode 100644
index 127add0cdd..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_dnszone.py
+++ /dev/null
@@ -1,302 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Obezimnaka Boms, <t-ozboms@microsoft.com>
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_dnszone
-
-version_added: "2.4"
-
-short_description: Manage Azure DNS zones
-
-description:
- - Creates and deletes Azure DNS zones.
-
-options:
- resource_group:
- description:
- - name of resource group.
- required: true
- name:
- description:
- - Name of the DNS zone.
- required: true
- state:
- description:
- - Assert the state of the zone. Use C(present) to create or update and C(absent) to delete.
- default: present
- choices:
- - absent
- - present
- type:
- description:
- - The type of this DNS zone (C(public) or C(private)).
- choices:
- - public
- - private
- version_added: 2.8
- registration_virtual_networks:
- description:
- - A list of references to virtual networks that register hostnames in this DNS zone.
- - This is a only when I(type=private).
- - Each element can be the name or resource id, or a dict contains C(name), C(resource_group) information of the virtual network.
- version_added: 2.8
- type: list
- resolution_virtual_networks:
- description:
- - A list of references to virtual networks that resolve records in this DNS zone.
- - This is a only when I(type=private).
- - Each element can be the name or resource id, or a dict contains C(name), C(resource_group) information of the virtual network.
- version_added: '2.8'
- type: list
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Obezimnaka Boms (@ozboms)
-'''
-
-EXAMPLES = '''
-
-- name: Create a DNS zone
- azure_rm_dnszone:
- resource_group: myResourceGroup
- name: example.com
-
-- name: Delete a DNS zone
- azure_rm_dnszone:
- resource_group: myResourceGroup
- name: example.com
- state: absent
-
-'''
-
-RETURN = '''
-state:
- description:
- - Current state of the zone.
- returned: always
- type: dict
- sample: {
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup",
- "location": "global",
- "name": "Testing",
- "name_servers": [
- "ns1-07.azure-dns.com.",
- "ns2-07.azure-dns.net.",
- "ns3-07.azure-dns.org.",
- "ns4-07.azure-dns.info."
- ],
- "number_of_record_sets": 2,
- "type": "private",
- "resolution_virtual_networks": ["/subscriptions/XXXX/resourceGroup/myResourceGroup/providers/Microsoft.Network/virtualNetworks/foo"]
- }
-
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
-from ansible.module_utils._text import to_native
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMDNSZone(AzureRMModuleBase):
-
- def __init__(self):
-
- # define user inputs from playbook
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- state=dict(choices=['present', 'absent'], default='present', type='str'),
- type=dict(type='str', choices=['private', 'public']),
- registration_virtual_networks=dict(type='list', elements='raw'),
- resolution_virtual_networks=dict(type='list', elements='raw')
- )
-
- # store the results of the module operation
- self.results = dict(
- changed=False,
- state=dict()
- )
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.tags = None
- self.type = None
- self.registration_virtual_networks = None
- self.resolution_virtual_networks = None
-
- super(AzureRMDNSZone, self).__init__(self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
-
- # create a new zone variable in case the 'try' doesn't find a zone
- zone = None
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- self.registration_virtual_networks = self.preprocess_vn_list(self.registration_virtual_networks)
- self.resolution_virtual_networks = self.preprocess_vn_list(self.resolution_virtual_networks)
-
- self.results['check_mode'] = self.check_mode
-
- # retrieve resource group to make sure it exists
- self.get_resource_group(self.resource_group)
-
- changed = False
- results = dict()
-
- try:
- self.log('Fetching DNS zone {0}'.format(self.name))
- zone = self.dns_client.zones.get(self.resource_group, self.name)
-
- # serialize object into a dictionary
- results = zone_to_dict(zone)
-
- # don't change anything if creating an existing zone, but change if deleting it
- if self.state == 'present':
- changed = False
-
- update_tags, results['tags'] = self.update_tags(results['tags'])
- if update_tags:
- changed = True
- if self.type and results['type'] != self.type:
- changed = True
- results['type'] = self.type
- if self.resolution_virtual_networks:
- if set(self.resolution_virtual_networks) != set(results['resolution_virtual_networks'] or []):
- changed = True
- results['resolution_virtual_networks'] = self.resolution_virtual_networks
- else:
- # this property should not be changed
- self.resolution_virtual_networks = results['resolution_virtual_networks']
- if self.registration_virtual_networks:
- if set(self.registration_virtual_networks) != set(results['registration_virtual_networks'] or []):
- changed = True
- results['registration_virtual_networks'] = self.registration_virtual_networks
- else:
- self.registration_virtual_networks = results['registration_virtual_networks']
- elif self.state == 'absent':
- changed = True
-
- except CloudError:
- # the zone does not exist so create it
- if self.state == 'present':
- changed = True
- else:
- # you can't delete what is not there
- changed = False
-
- self.results['changed'] = changed
- self.results['state'] = results
-
- # return the results if your only gathering information
- if self.check_mode:
- return self.results
-
- if changed:
- if self.state == 'present':
- zone = self.dns_models.Zone(zone_type=str.capitalize(self.type) if self.type else None,
- tags=self.tags,
- location='global')
- if self.resolution_virtual_networks:
- zone.resolution_virtual_networks = self.construct_subresource_list(self.resolution_virtual_networks)
- if self.registration_virtual_networks:
- zone.registration_virtual_networks = self.construct_subresource_list(self.registration_virtual_networks)
- self.results['state'] = self.create_or_update_zone(zone)
- elif self.state == 'absent':
- # delete zone
- self.delete_zone()
- # the delete does not actually return anything. if no exception, then we'll assume
- # it worked.
- self.results['state']['status'] = 'Deleted'
-
- return self.results
-
- def create_or_update_zone(self, zone):
- try:
- # create or update the new Zone object we created
- new_zone = self.dns_client.zones.create_or_update(self.resource_group, self.name, zone)
- except Exception as exc:
- self.fail("Error creating or updating zone {0} - {1}".format(self.name, exc.message or str(exc)))
- return zone_to_dict(new_zone)
-
- def delete_zone(self):
- try:
- # delete the Zone
- poller = self.dns_client.zones.delete(self.resource_group, self.name)
- result = self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error deleting zone {0} - {1}".format(self.name, exc.message or str(exc)))
- return result
-
- def preprocess_vn_list(self, vn_list):
- return [self.parse_vn_id(x) for x in vn_list] if vn_list else None
-
- def parse_vn_id(self, vn):
- vn_dict = self.parse_resource_to_dict(vn) if not isinstance(vn, dict) else vn
- return format_resource_id(val=vn_dict['name'],
- subscription_id=vn_dict.get('subscription') or self.subscription_id,
- namespace='Microsoft.Network',
- types='virtualNetworks',
- resource_group=vn_dict.get('resource_group') or self.resource_group)
-
- def construct_subresource_list(self, raw):
- return [self.dns_models.SubResource(id=x) for x in raw] if raw else None
-
-
-def zone_to_dict(zone):
- # turn Zone object into a dictionary (serialization)
- result = dict(
- id=zone.id,
- name=zone.name,
- number_of_record_sets=zone.number_of_record_sets,
- name_servers=zone.name_servers,
- tags=zone.tags,
- type=zone.zone_type.value.lower(),
- registration_virtual_networks=[to_native(x.id) for x in zone.registration_virtual_networks] if zone.registration_virtual_networks else None,
- resolution_virtual_networks=[to_native(x.id) for x in zone.resolution_virtual_networks] if zone.resolution_virtual_networks else None
- )
- return result
-
-
-def main():
- AzureRMDNSZone()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_dnszone_info.py b/lib/ansible/modules/cloud/azure/azure_rm_dnszone_info.py
deleted file mode 100644
index a09e2aa321..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_dnszone_info.py
+++ /dev/null
@@ -1,258 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Obezimnaka Boms, <t-ozboms@microsoft.com>
-#
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_dnszone_info
-
-version_added: "2.9"
-
-short_description: Get DNS zone facts
-
-description:
- - Get facts for a specific DNS zone or all DNS zones within a resource group.
-
-options:
- resource_group:
- description:
- - Limit results by resource group. Required when filtering by name.
- name:
- description:
- - Only show results for a specific zone.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Obezimnaka Boms (@ozboms)
-
-'''
-
-EXAMPLES = '''
-- name: Get facts for one zone
- azure_rm_dnszone_info:
- resource_group: myResourceGroup
- name: foobar22
-
-- name: Get facts for all zones in a resource group
- azure_rm_dnszone_info:
- resource_group: myResourceGroup
-
-- name: Get facts by tags
- azure_rm_dnszone_info:
- tags:
- - testing
-'''
-
-RETURN = '''
-azure_dnszones:
- description:
- - List of zone dicts.
- returned: always
- type: list
- example: [{
- "etag": "00000002-0000-0000-0dcb-df5776efd201",
- "location": "global",
- "properties": {
- "maxNumberOfRecordSets": 5000,
- "numberOfRecordSets": 15
- },
- "tags": {}
- }]
-dnszones:
- description:
- - List of zone dicts, which share the same layout as azure_rm_dnszone module parameter.
- returned: always
- type: list
- contains:
- id:
- description:
- - id of the DNS Zone.
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/dnszones/azure.com"
- name:
- description:
- - name of the DNS zone.
- sample: azure.com
- type:
- description:
- - The type of this DNS zone (C(public) or C(private)).
- sample: private
- registration_virtual_networks:
- description:
- - A list of references to virtual networks that register hostnames in this DNS zone.
- type: list
- sample: ["/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/bar"]
- resolution_virtual_networks:
- description:
- - A list of references to virtual networks that resolve records in this DNS zone.
- type: list
- sample: ["/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/deadbeef"]
- number_of_record_sets:
- description:
- - The current number of record sets in this DNS zone.
- type: int
- sample: 2
- max_number_of_record_sets:
- description:
- - The maximum number of record sets that can be created in this DNS zone.
- type: int
- sample: 5000
- name_servers:
- description:
- - The name servers for this DNS zone.
- type: list
- sample: [
- "ns1-03.azure-dns.com.",
- "ns2-03.azure-dns.net.",
- "ns3-03.azure-dns.org.",
- "ns4-03.azure-dns.info."
- ]
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils._text import to_native
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.common import AzureMissingResourceHttpError, AzureHttpError
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-AZURE_OBJECT_CLASS = 'DnsZone'
-
-
-class AzureRMDNSZoneInfo(AzureRMModuleBase):
-
- def __init__(self):
-
- # define user inputs into argument
- self.module_arg_spec = dict(
- name=dict(type='str'),
- resource_group=dict(type='str'),
- tags=dict(type='list')
- )
-
- # store the results of the module operation
- self.results = dict(
- changed=False,
- ansible_info=dict(azure_dnszones=[])
- )
-
- self.name = None
- self.resource_group = None
- self.tags = None
-
- super(AzureRMDNSZoneInfo, self).__init__(self.module_arg_spec)
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_dnszone_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_dnszone_facts' module has been renamed to 'azure_rm_dnszone_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name and not self.resource_group:
- self.fail("Parameter error: resource group required when filtering by name.")
-
- results = []
- # list the conditions and what to return based on user input
- if self.name is not None:
- # if there is a name, facts about that specific zone
- results = self.get_item()
- elif self.resource_group:
- # all the zones listed in that specific resource group
- results = self.list_resource_group()
- else:
- # all the zones in a subscription
- results = self.list_items()
-
- self.results['ansible_info']['azure_dnszones'] = self.serialize_items(results)
- self.results['dnszones'] = self.curated_items(results)
-
- return self.results
-
- def get_item(self):
- self.log('Get properties for {0}'.format(self.name))
- item = None
- results = []
- # get specific zone
- try:
- item = self.dns_client.zones.get(self.resource_group, self.name)
- except CloudError:
- pass
-
- # serialize result
- if item and self.has_tags(item.tags, self.tags):
- results = [item]
- return results
-
- def list_resource_group(self):
- self.log('List items for resource group')
- try:
- response = self.dns_client.zones.list_by_resource_group(self.resource_group)
- except AzureHttpError as exc:
- self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(item)
- return results
-
- def list_items(self):
- self.log('List all items')
- try:
- response = self.dns_client.zones.list()
- except AzureHttpError as exc:
- self.fail("Failed to list all items - {0}".format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(item)
- return results
-
- def serialize_items(self, raws):
- return [self.serialize_obj(item, AZURE_OBJECT_CLASS) for item in raws] if raws else []
-
- def curated_items(self, raws):
- return [self.zone_to_dict(item) for item in raws] if raws else []
-
- def zone_to_dict(self, zone):
- return dict(
- id=zone.id,
- name=zone.name,
- number_of_record_sets=zone.number_of_record_sets,
- max_number_of_record_sets=zone.max_number_of_record_sets,
- name_servers=zone.name_servers,
- tags=zone.tags,
- type=zone.zone_type.value.lower(),
- registration_virtual_networks=[to_native(x.id) for x in zone.registration_virtual_networks] if zone.registration_virtual_networks else None,
- resolution_virtual_networks=[to_native(x.id) for x in zone.resolution_virtual_networks] if zone.resolution_virtual_networks else None
- )
-
-
-def main():
- AzureRMDNSZoneInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_functionapp.py b/lib/ansible/modules/cloud/azure/azure_rm_functionapp.py
deleted file mode 100644
index 0c372a88de..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_functionapp.py
+++ /dev/null
@@ -1,421 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Thomas Stringer <tomstr@microsoft.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_functionapp
-version_added: "2.4"
-short_description: Manage Azure Function Apps
-description:
- - Create, update or delete an Azure Function App.
-options:
- resource_group:
- description:
- - Name of resource group.
- required: true
- aliases:
- - resource_group_name
- name:
- description:
- - Name of the Azure Function App.
- required: true
- location:
- description:
- - Valid Azure location. Defaults to location of the resource group.
- plan:
- description:
- - App service plan.
- - It can be name of existing app service plan in same resource group as function app.
- - It can be resource id of existing app service plan.
- - Resource id. For example /subscriptions/<subs_id>/resourceGroups/<resource_group>/providers/Microsoft.Web/serverFarms/<plan_name>.
- - It can be a dict which contains C(name), C(resource_group).
- - C(name). Name of app service plan.
- - C(resource_group). Resource group name of app service plan.
- version_added: "2.8"
- container_settings:
- description: Web app container settings.
- suboptions:
- name:
- description:
- - Name of container. For example "imagename:tag".
- registry_server_url:
- description:
- - Container registry server url. For example C(mydockerregistry.io).
- registry_server_user:
- description:
- - The container registry server user name.
- registry_server_password:
- description:
- - The container registry server password.
- version_added: "2.8"
- storage_account:
- description:
- - Name of the storage account to use.
- required: true
- aliases:
- - storage
- - storage_account_name
- app_settings:
- description:
- - Dictionary containing application settings.
- state:
- description:
- - Assert the state of the Function App. Use C(present) to create or update a Function App and C(absent) to delete.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Thomas Stringer (@trstringer)
-'''
-
-EXAMPLES = '''
-- name: Create a function app
- azure_rm_functionapp:
- resource_group: myResourceGroup
- name: myFunctionApp
- storage_account: myStorageAccount
-
-- name: Create a function app with app settings
- azure_rm_functionapp:
- resource_group: myResourceGroup
- name: myFunctionApp
- storage_account: myStorageAccount
- app_settings:
- setting1: value1
- setting2: value2
-
-- name: Create container based function app
- azure_rm_functionapp:
- resource_group: myResourceGroup
- name: myFunctionApp
- storage_account: myStorageAccount
- plan:
- resource_group: myResourceGroup
- name: myAppPlan
- container_settings:
- name: httpd
- registry_server_url: index.docker.io
-
-- name: Delete a function app
- azure_rm_functionapp:
- resource_group: myResourceGroup
- name: myFunctionApp
- state: absent
-'''
-
-RETURN = '''
-state:
- description:
- - Current state of the Azure Function App.
- returned: success
- type: dict
- example:
- id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myFunctionApp
- name: myfunctionapp
- kind: functionapp
- location: East US
- type: Microsoft.Web/sites
- state: Running
- host_names:
- - myfunctionapp.azurewebsites.net
- repository_site_name: myfunctionapp
- usage_state: Normal
- enabled: true
- enabled_host_names:
- - myfunctionapp.azurewebsites.net
- - myfunctionapp.scm.azurewebsites.net
- availability_state: Normal
- host_name_ssl_states:
- - name: myfunctionapp.azurewebsites.net
- ssl_state: Disabled
- host_type: Standard
- - name: myfunctionapp.scm.azurewebsites.net
- ssl_state: Disabled
- host_type: Repository
- server_farm_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/EastUSPlan
- reserved: false
- last_modified_time_utc: 2017-08-22T18:54:01.190Z
- scm_site_also_stopped: false
- client_affinity_enabled: true
- client_cert_enabled: false
- host_names_disabled: false
- outbound_ip_addresses: ............
- container_size: 1536
- daily_memory_time_quota: 0
- resource_group: myResourceGroup
- default_host_name: myfunctionapp.azurewebsites.net
-''' # NOQA
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.web.models import (
- site_config, app_service_plan, Site, SiteConfig, NameValuePair, SiteSourceControl,
- AppServicePlan, SkuDescription
- )
- from azure.mgmt.resource.resources import ResourceManagementClient
- from msrest.polling import LROPoller
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-container_settings_spec = dict(
- name=dict(type='str', required=True),
- registry_server_url=dict(type='str'),
- registry_server_user=dict(type='str'),
- registry_server_password=dict(type='str', no_log=True)
-)
-
-
-class AzureRMFunctionApp(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True, aliases=['resource_group_name']),
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- location=dict(type='str'),
- storage_account=dict(
- type='str',
- aliases=['storage', 'storage_account_name']
- ),
- app_settings=dict(type='dict'),
- plan=dict(
- type='raw'
- ),
- container_settings=dict(
- type='dict',
- options=container_settings_spec
- )
- )
-
- self.results = dict(
- changed=False,
- state=dict()
- )
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.location = None
- self.storage_account = None
- self.app_settings = None
- self.plan = None
- self.container_settings = None
-
- required_if = [('state', 'present', ['storage_account'])]
-
- super(AzureRMFunctionApp, self).__init__(
- self.module_arg_spec,
- supports_check_mode=True,
- required_if=required_if
- )
-
- def exec_module(self, **kwargs):
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- if self.app_settings is None:
- self.app_settings = dict()
-
- try:
- resource_group = self.rm_client.resource_groups.get(self.resource_group)
- except CloudError:
- self.fail('Unable to retrieve resource group')
-
- self.location = self.location or resource_group.location
-
- try:
- function_app = self.web_client.web_apps.get(
- resource_group_name=self.resource_group,
- name=self.name
- )
- # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
- exists = function_app is not None
- except CloudError as exc:
- exists = False
-
- if self.state == 'absent':
- if exists:
- if self.check_mode:
- self.results['changed'] = True
- return self.results
- try:
- self.web_client.web_apps.delete(
- resource_group_name=self.resource_group,
- name=self.name
- )
- self.results['changed'] = True
- except CloudError as exc:
- self.fail('Failure while deleting web app: {0}'.format(exc))
- else:
- self.results['changed'] = False
- else:
- kind = 'functionapp'
- linux_fx_version = None
- if self.container_settings and self.container_settings.get('name'):
- kind = 'functionapp,linux,container'
- linux_fx_version = 'DOCKER|'
- if self.container_settings.get('registry_server_url'):
- self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url']
- linux_fx_version += self.container_settings['registry_server_url'] + '/'
- linux_fx_version += self.container_settings['name']
- if self.container_settings.get('registry_server_user'):
- self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings.get('registry_server_user')
-
- if self.container_settings.get('registry_server_password'):
- self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings.get('registry_server_password')
-
- if not self.plan and function_app:
- self.plan = function_app.server_farm_id
-
- if not exists:
- function_app = Site(
- location=self.location,
- kind=kind,
- site_config=SiteConfig(
- app_settings=self.aggregated_app_settings(),
- scm_type='LocalGit'
- )
- )
- self.results['changed'] = True
- else:
- self.results['changed'], function_app = self.update(function_app)
-
- # get app service plan
- if self.plan:
- if isinstance(self.plan, dict):
- self.plan = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Web/serverfarms/{2}".format(
- self.subscription_id,
- self.plan.get('resource_group', self.resource_group),
- self.plan.get('name')
- )
- function_app.server_farm_id = self.plan
-
- # set linux fx version
- if linux_fx_version:
- function_app.site_config.linux_fx_version = linux_fx_version
-
- if self.check_mode:
- self.results['state'] = function_app.as_dict()
- elif self.results['changed']:
- try:
- new_function_app = self.web_client.web_apps.create_or_update(
- resource_group_name=self.resource_group,
- name=self.name,
- site_envelope=function_app
- ).result()
- self.results['state'] = new_function_app.as_dict()
- except CloudError as exc:
- self.fail('Error creating or updating web app: {0}'.format(exc))
-
- return self.results
-
- def update(self, source_function_app):
- """Update the Site object if there are any changes"""
-
- source_app_settings = self.web_client.web_apps.list_application_settings(
- resource_group_name=self.resource_group,
- name=self.name
- )
-
- changed, target_app_settings = self.update_app_settings(source_app_settings.properties)
-
- source_function_app.site_config = SiteConfig(
- app_settings=target_app_settings,
- scm_type='LocalGit'
- )
-
- return changed, source_function_app
-
- def update_app_settings(self, source_app_settings):
- """Update app settings"""
-
- target_app_settings = self.aggregated_app_settings()
- target_app_settings_dict = dict([(i.name, i.value) for i in target_app_settings])
- return target_app_settings_dict != source_app_settings, target_app_settings
-
- def necessary_functionapp_settings(self):
- """Construct the necessary app settings required for an Azure Function App"""
-
- function_app_settings = []
-
- if self.container_settings is None:
- for key in ['AzureWebJobsStorage', 'WEBSITE_CONTENTAZUREFILECONNECTIONSTRING', 'AzureWebJobsDashboard']:
- function_app_settings.append(NameValuePair(name=key, value=self.storage_connection_string))
- function_app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~1'))
- function_app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION', value='6.5.0'))
- function_app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=self.name))
- else:
- function_app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
- function_app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE', value=False))
- function_app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=self.storage_connection_string))
-
- return function_app_settings
-
- def aggregated_app_settings(self):
- """Combine both system and user app settings"""
-
- function_app_settings = self.necessary_functionapp_settings()
- for app_setting_key in self.app_settings:
- found_setting = None
- for s in function_app_settings:
- if s.name == app_setting_key:
- found_setting = s
- break
- if found_setting:
- found_setting.value = self.app_settings[app_setting_key]
- else:
- function_app_settings.append(NameValuePair(
- name=app_setting_key,
- value=self.app_settings[app_setting_key]
- ))
- return function_app_settings
-
- @property
- def storage_connection_string(self):
- """Construct the storage account connection string"""
-
- return 'DefaultEndpointsProtocol=https;AccountName={0};AccountKey={1}'.format(
- self.storage_account,
- self.storage_key
- )
-
- @property
- def storage_key(self):
- """Retrieve the storage account key"""
-
- return self.storage_client.storage_accounts.list_keys(
- resource_group_name=self.resource_group,
- account_name=self.storage_account
- ).keys[0].value
-
-
-def main():
- """Main function execution"""
-
- AzureRMFunctionApp()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_functionapp_info.py b/lib/ansible/modules/cloud/azure/azure_rm_functionapp_info.py
deleted file mode 100644
index 0cd5b6f60b..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_functionapp_info.py
+++ /dev/null
@@ -1,206 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Thomas Stringer, <tomstr@microsoft.com>
-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_functionapp_info
-version_added: "2.9"
-short_description: Get Azure Function App facts
-description:
- - Get facts for one Azure Function App or all Function Apps within a resource group.
-options:
- name:
- description:
- - Only show results for a specific Function App.
- resource_group:
- description:
- - Limit results to a resource group. Required when filtering by name.
- aliases:
- - resource_group_name
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Thomas Stringer (@trstringer)
-'''
-
-EXAMPLES = '''
- - name: Get facts for one Function App
- azure_rm_functionapp_info:
- resource_group: myResourceGroup
- name: myfunctionapp
-
- - name: Get facts for all Function Apps in a resource group
- azure_rm_functionapp_info:
- resource_group: myResourceGroup
-
- - name: Get facts for all Function Apps by tags
- azure_rm_functionapp_info:
- tags:
- - testing
-'''
-
-RETURN = '''
-azure_functionapps:
- description:
- - List of Azure Function Apps dicts.
- returned: always
- type: list
- example:
- id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/sites/myfunctionapp
- name: myfunctionapp
- kind: functionapp
- location: East US
- type: Microsoft.Web/sites
- state: Running
- host_names:
- - myfunctionapp.azurewebsites.net
- repository_site_name: myfunctionapp
- usage_state: Normal
- enabled: true
- enabled_host_names:
- - myfunctionapp.azurewebsites.net
- - myfunctionapp.scm.azurewebsites.net
- availability_state: Normal
- host_name_ssl_states:
- - name: myfunctionapp.azurewebsites.net
- ssl_state: Disabled
- host_type: Standard
- - name: myfunctionapp.scm.azurewebsites.net
- ssl_state: Disabled
- host_type: Repository
- server_farm_id: /subscriptions/.../resourceGroups/ansible-rg/providers/Microsoft.Web/serverfarms/EastUSPlan
- reserved: false
- last_modified_time_utc: 2017-08-22T18:54:01.190Z
- scm_site_also_stopped: false
- client_affinity_enabled: true
- client_cert_enabled: false
- host_names_disabled: false
- outbound_ip_addresses: ............
- container_size: 1536
- daily_memory_time_quota: 0
- resource_group: myResourceGroup
- default_host_name: myfunctionapp.azurewebsites.net
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-
-class AzureRMFunctionAppInfo(AzureRMModuleBase):
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str'),
- resource_group=dict(type='str', aliases=['resource_group_name']),
- tags=dict(type='list'),
- )
-
- self.results = dict(
- changed=False,
- ansible_info=dict(azure_functionapps=[])
- )
-
- self.name = None
- self.resource_group = None
- self.tags = None
-
- super(AzureRMFunctionAppInfo, self).__init__(
- self.module_arg_spec,
- supports_tags=False,
- facts_module=True
- )
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_functionapp_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_functionapp_facts' module has been renamed to 'azure_rm_functionapp_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name and not self.resource_group:
- self.fail("Parameter error: resource group required when filtering by name.")
-
- if self.name:
- self.results['ansible_info']['azure_functionapps'] = self.get_functionapp()
- elif self.resource_group:
- self.results['ansible_info']['azure_functionapps'] = self.list_resource_group()
- else:
- self.results['ansible_info']['azure_functionapps'] = self.list_all()
-
- return self.results
-
- def get_functionapp(self):
- self.log('Get properties for Function App {0}'.format(self.name))
- function_app = None
- result = []
-
- try:
- function_app = self.web_client.web_apps.get(
- self.resource_group,
- self.name
- )
- except CloudError:
- pass
-
- if function_app and self.has_tags(function_app.tags, self.tags):
- result = function_app.as_dict()
-
- return [result]
-
- def list_resource_group(self):
- self.log('List items')
- try:
- response = self.web_client.web_apps.list_by_resource_group(self.resource_group)
- except Exception as exc:
- self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(item.as_dict())
- return results
-
- def list_all(self):
- self.log('List all items')
- try:
- response = self.web_client.web_apps.list_by_resource_group(self.resource_group)
- except Exception as exc:
- self.fail("Error listing all items - {0}".format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(item.as_dict())
- return results
-
-
-def main():
- AzureRMFunctionAppInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_gallery.py b/lib/ansible/modules/cloud/azure/azure_rm_gallery.py
deleted file mode 100644
index 3fee6dfdb9..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_gallery.py
+++ /dev/null
@@ -1,308 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_gallery
-version_added: '2.9'
-short_description: Manage Azure Shared Image Gallery instance
-description:
- - Create, update and delete instance of Azure Shared Image Gallery (SIG).
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: true
- type: str
- name:
- description:
- - The name of the Shared Image Gallery.
- - Valid names consist of less than 80 alphanumeric characters, underscores and periods.
- required: true
- type: str
- location:
- description:
- - Resource location.
- type: str
- description:
- description:
- - The description of this Shared Image Gallery resource. This property is updatable.
- type: str
- state:
- description:
- - Assert the state of the Gallery.
- - Use C(present) to create or update an Gallery and C(absent) to delete it.
- default: present
- type: str
- choices:
- - absent
- - present
-extends_documentation_fragment:
- - azure
- - azure_tags
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
-- name: Create or update a simple gallery.
- azure_rm_gallery:
- resource_group: myResourceGroup
- name: myGallery1283
- location: West US
- description: This is the gallery description.
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myGallery1283"
-'''
-
-import time
-import json
-import re
-from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
-from ansible.module_utils.azure_rm_common_rest import GenericRestClient
-from copy import deepcopy
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMGalleries(AzureRMModuleBaseExt):
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- updatable=False,
- disposition='resourceGroupName',
- required=True
- ),
- name=dict(
- type='str',
- updatable=False,
- disposition='galleryName',
- required=True
- ),
- location=dict(
- type='str',
- updatable=False,
- disposition='/'
- ),
- description=dict(
- type='str',
- disposition='/properties/*'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.name = None
- self.gallery = None
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.url = None
- self.status_code = [200, 201, 202]
- self.to_do = Actions.NoAction
-
- self.body = {}
- self.query_parameters = {}
- self.query_parameters['api-version'] = '2019-07-01'
- self.header_parameters = {}
- self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
-
- super(AzureRMGalleries, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- self.body[key] = kwargs[key]
-
- self.inflate_parameters(self.module_arg_spec, self.body, 0)
-
- old_response = None
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- resource_group = self.get_resource_group(self.resource_group)
-
- if 'location' not in self.body:
- self.body['location'] = resource_group.location
-
- self.url = ('/subscriptions' +
- '/{{ subscription_id }}' +
- '/resourceGroups' +
- '/{{ resource_group }}' +
- '/providers' +
- '/Microsoft.Compute' +
- '/galleries' +
- '/{{ gallery_name }}')
- self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
- self.url = self.url.replace('{{ resource_group }}', self.resource_group)
- self.url = self.url.replace('{{ gallery_name }}', self.name)
-
- old_response = self.get_resource()
-
- if not old_response:
- self.log("Gallery instance doesn't exist")
-
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log('Gallery instance already exists')
-
- if self.state == 'absent':
- self.to_do = Actions.Delete
- else:
- modifiers = {}
- self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
- self.results['modifiers'] = modifiers
- self.results['compare'] = []
- if not self.default_compare(modifiers, self.body, old_response, '', self.results):
- self.to_do = Actions.Update
- self.body['properties'].pop('identifier', None)
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log('Need to Create / Update the Gallery instance')
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_resource()
-
- # if not old_response:
- self.results['changed'] = True
- # else:
- # self.results['changed'] = old_response.__ne__(response)
- self.log('Creation / Update done')
- elif self.to_do == Actions.Delete:
- self.log('Gallery instance deleted')
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_resource()
-
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_resource():
- time.sleep(20)
- else:
- self.log('Gallery instance unchanged')
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
-
- return self.results
-
- def create_update_resource(self):
- # self.log('Creating / Updating the Gallery instance {0}'.format(self.))
-
- try:
- response = self.mgmt_client.query(self.url,
- 'PUT',
- self.query_parameters,
- self.header_parameters,
- self.body,
- self.status_code,
- 600,
- 30)
- except CloudError as exc:
- self.log('Error attempting to create the Gallery instance.')
- self.fail('Error creating the Gallery instance: {0}'.format(str(exc)))
-
- try:
- response = json.loads(response.text)
- except Exception:
- response = {'text': response.text}
-
- return response
-
- def delete_resource(self):
- # self.log('Deleting the Gallery instance {0}'.format(self.))
- try:
- response = self.mgmt_client.query(self.url,
- 'DELETE',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- except CloudError as e:
- self.log('Error attempting to delete the Gallery instance.')
- self.fail('Error deleting the Gallery instance: {0}'.format(str(e)))
-
- return True
-
- def get_resource(self):
- # self.log('Checking if the Gallery instance {0} is present'.format(self.))
- found = False
- try:
- response = self.mgmt_client.query(self.url,
- 'GET',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- response = json.loads(response.text)
- found = True
- self.log("Response : {0}".format(response))
- # self.log("AzureFirewall instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the AzureFirewall instance.')
- if found is True:
- return response
-
- return False
-
-
-def main():
- AzureRMGalleries()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_gallery_info.py b/lib/ansible/modules/cloud/azure/azure_rm_gallery_info.py
deleted file mode 100644
index 98e44674f2..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_gallery_info.py
+++ /dev/null
@@ -1,263 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Liu Qingyi, (@smile37773)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_gallery_info
-version_added: '2.9'
-short_description: Get Azure Shared Image Gallery info
-description:
- - Get info of Azure Shared Image Gallery.
-options:
- resource_group:
- description:
- - The name of the resource group.
- type: str
- name:
- description:
- - Resource name
- type: str
-extends_documentation_fragment:
- - azure
-author:
- - Liu Qingyi (@smile37773)
-
-'''
-
-EXAMPLES = '''
-- name: List galleries in a subscription.
- azure_rm_gallery_info:
-- name: List galleries in a resource group.
- azure_rm_gallery_info:
- resource_group: myResourceGroup
-- name: Get a gallery.
- azure_rm_gallery_info:
- resource_group: myResourceGroup
- name: myGallery
-
-'''
-
-RETURN = '''
-galleries:
- description:
- - A list of dict results where the key is the name of the gallery and the values are the info for that gallery.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myGallery"
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: "myGallery"
- location:
- description:
- - Resource location.
- returned: always
- type: str
- sample: "eastus"
- tags:
- description:
- - Resource tags.
- returned: always
- type: dict
- sample: { "tag": "value" }
- description:
- description:
- - This is the gallery description.
- returned: always
- type: str
- sample: "This is the gallery description."
- provisioning_state:
- description:
- - The current state of the gallery.
- returned: always
- type: str
- sample: "Succeeded"
-
-'''
-
-import time
-import json
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.azure_rm_common_rest import GenericRestClient
-from copy import deepcopy
-try:
- from msrestazure.azure_exceptions import CloudError
-except Exception:
- # handled in azure_rm_common
- pass
-
-
-class AzureRMGalleriesInfo(AzureRMModuleBase):
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str'
- ),
- name=dict(
- type='str'
- )
- )
-
- self.resource_group = None
- self.name = None
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.url = None
- self.status_code = [200]
-
- self.query_parameters = {}
- self.query_parameters['api-version'] = '2019-03-01'
- self.header_parameters = {}
- self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
-
- self.mgmt_client = None
- super(AzureRMGalleriesInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if (self.resource_group is not None and self.name is not None):
- # self.results['galleries'] = self.format_item(self.get())
- self.results['galleries'] = self.get()
- elif (self.resource_group is not None):
- # self.results['galleries'] = self.format_item(self.listbyresourcegroup())
- self.results['galleries'] = self.listbyresourcegroup()
- else:
- # self.results['galleries'] = [self.format_item(self.list())]
- self.results['galleries'] = self.list()
- return self.results
-
- def get(self):
- response = None
- results = {}
- # prepare url
- self.url = ('/subscriptions' +
- '/{{ subscription_id }}' +
- '/resourceGroups' +
- '/{{ resource_group }}' +
- '/providers' +
- '/Microsoft.Compute' +
- '/galleries' +
- '/{{ gallery_name }}')
- self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
- self.url = self.url.replace('{{ resource_group }}', self.resource_group)
- self.url = self.url.replace('{{ gallery_name }}', self.name)
-
- try:
- response = self.mgmt_client.query(self.url,
- 'GET',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- results = json.loads(response.text)
- # self.log('Response : {0}'.format(response))
- except CloudError as e:
- self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
-
- return self.format_item(results)
-
- def listbyresourcegroup(self):
- response = None
- results = {}
- # prepare url
- self.url = ('/subscriptions' +
- '/{{ subscription_id }}' +
- '/resourceGroups' +
- '/{{ resource_group }}' +
- '/providers' +
- '/Microsoft.Compute' +
- '/galleries')
- self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
- self.url = self.url.replace('{{ resource_group }}', self.resource_group)
-
- try:
- response = self.mgmt_client.query(self.url,
- 'GET',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- results = json.loads(response.text)
- # self.log('Response : {0}'.format(response))
- except CloudError as e:
- self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
-
- return [self.format_item(x) for x in results['value']] if results['value'] else []
-
- def list(self):
- response = None
- results = {}
- # prepare url
- self.url = ('/subscriptions' +
- '/{{ subscription_id }}' +
- '/providers' +
- '/Microsoft.Compute' +
- '/galleries')
- self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
-
- try:
- response = self.mgmt_client.query(self.url,
- 'GET',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- results = json.loads(response.text)
- # self.log('Response : {0}'.format(response))
- except CloudError as e:
- self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
-
- return [self.format_item(x) for x in results['value']] if results['value'] else []
-
- def format_item(self, item):
- d = {
- 'id': item['id'],
- 'name': item['name'],
- 'location': item['location'],
- 'tags': item.get('tags'),
- 'description': item['properties']['description'],
- 'provisioning_state': item['properties']['provisioningState']
- }
- return d
-
-
-def main():
- AzureRMGalleriesInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_galleryimage.py b/lib/ansible/modules/cloud/azure/azure_rm_galleryimage.py
deleted file mode 100644
index 6ef017c5ee..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_galleryimage.py
+++ /dev/null
@@ -1,544 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_galleryimage
-version_added: '2.9'
-short_description: Manage Azure SIG Image instance
-description:
- - Create, update and delete instance of Azure SIG Image.
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: true
- type: str
- gallery_name:
- description:
- - The name of the Shared Image Gallery in which the Image Definition is to be created.
- required: true
- type: str
- name:
- description:
- - The name of the gallery Image Definition to be created or updated.
- - The allowed characters are alphabets and numbers with dots, dashes, and periods allowed in the middle.
- - The maximum length is 80 characters.
- required: true
- type: str
- location:
- description:
- - Resource location.
- type: str
- description:
- description:
- - The description of this gallery Image Definition resource. This property is updatable.
- type: str
- eula:
- description:
- - The Eula agreement for the gallery Image Definition.
- type: str
- privacy_statement_uri:
- description:
- - The privacy statement uri.
- type: str
- release_note_uri:
- description:
- - The release note uri.
- type: str
- os_type:
- description:
- - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.
- choices:
- - windows
- - linux
- required: true
- type: str
- os_state:
- description:
- - The allowed values for OS State are C(generalized).
- choices:
- - generalized
- - specialized
- required: true
- type: str
- end_of_life_date:
- description:
- - The end of life date of the gallery Image Definition.
- - This property can be used for decommissioning purposes.
- - This property is updatable.
- - Format should be according to ISO-8601, for instance "2019-06-26".
- type: str
- identifier:
- description:
- - Image identifier.
- required: true
- type: dict
- suboptions:
- publisher:
- description:
- - The name of the gallery Image Definition publisher.
- required: true
- type: str
- offer:
- description:
- - The name of the gallery Image Definition offer.
- required: true
- type: str
- sku:
- description:
- - The name of the gallery Image Definition SKU.
- required: true
- type: str
- recommended:
- description:
- - Recommended parameter values.
- type: dict
- suboptions:
- v_cpus:
- description:
- - Number of virtual CPUs.
- type: dict
- suboptions:
- min:
- description:
- - The minimum number of the resource.
- type: int
- max:
- description:
- - The maximum number of the resource.
- type: int
- memory:
- description:
- - Memory.
- type: dict
- suboptions:
- min:
- description:
- - The minimum number of the resource.
- type: int
- max:
- description:
- - The maximum number of the resource.
- type: int
- disallowed:
- description:
- - Disallowed parameter values.
- type: dict
- suboptions:
- disk_types:
- description:
- - A list of disallowed disk types.
- type: list
- purchase_plan:
- description:
- - Purchase plan.
- type: dict
- suboptions:
- name:
- description:
- - The plan ID.
- type: str
- publisher:
- description:
- - The publisher ID.
- type: str
- product:
- description:
- - The product ID.
- type: str
- state:
- description:
- - Assert the state of the GalleryImage.
- - Use C(present) to create or update an GalleryImage and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
- type: str
-extends_documentation_fragment:
- - azure
- - azure_tags
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
-- name: Create or update gallery image
- azure_rm_galleryimage:
- resource_group: myResourceGroup
- gallery_name: myGallery1283
- name: myImage
- location: West US
- os_type: linux
- os_state: generalized
- identifier:
- publisher: myPublisherName
- offer: myOfferName
- sku: mySkuName
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myGalle
- ry1283/images/myImage"
-'''
-
-import time
-import json
-import re
-from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
-from ansible.module_utils.azure_rm_common_rest import GenericRestClient
-from copy import deepcopy
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMGalleryImages(AzureRMModuleBaseExt):
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- updatable=False,
- disposition='resourceGroupName',
- required=True
- ),
- gallery_name=dict(
- type='str',
- updatable=False,
- disposition='galleryName',
- required=True
- ),
- name=dict(
- type='str',
- updatable=False,
- disposition='galleryImageName',
- required=True
- ),
- location=dict(
- type='str',
- updatable=False,
- disposition='/'
- ),
- description=dict(
- type='str',
- disposition='/properties/*'
- ),
- eula=dict(
- type='str',
- disposition='/properties/*'
- ),
- privacy_statement_uri=dict(
- type='str',
- disposition='/properties/privacyStatementUri'
- ),
- release_note_uri=dict(
- type='str',
- disposition='/properties/releaseNoteUri'
- ),
- os_type=dict(
- type='str',
- disposition='/properties/osType',
- choices=['windows',
- 'linux']
- ),
- os_state=dict(
- type='str',
- disposition='/properties/osState',
- choices=['generalized',
- 'specialized']
- ),
- end_of_life_date=dict(
- type='str',
- disposition='/properties/endOfLifeDate'
- ),
- identifier=dict(
- type='dict',
- disposition='/properties/*',
- options=dict(
- publisher=dict(
- type='str',
- required=True,
- updatable=False
- ),
- offer=dict(
- type='str',
- required=True
- ),
- sku=dict(
- type='str',
- required=True
- )
- )
- ),
- recommended=dict(
- type='dict',
- disposition='/properties/*',
- options=dict(
- v_cpus=dict(
- type='dict',
- disposition='vCPUs',
- options=dict(
- min=dict(
- type='int'
- ),
- max=dict(
- type='int'
- )
- )
- ),
- memory=dict(
- type='dict',
- options=dict(
- min=dict(
- type='int'
- ),
- max=dict(
- type='int'
- )
- )
- )
- )
- ),
- disallowed=dict(
- type='dict',
- disposition='/properties/*',
- options=dict(
- disk_types=dict(
- type='list',
- disposition='diskTypes'
- )
- )
- ),
- purchase_plan=dict(
- type='dict',
- disposition='/properties/purchasePlan',
- options=dict(
- name=dict(
- type='str'
- ),
- publisher=dict(
- type='str'
- ),
- product=dict(
- type='str'
- )
- )
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.gallery_name = None
- self.name = None
- self.gallery_image = None
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.url = None
- self.status_code = [200, 201, 202]
- self.to_do = Actions.NoAction
-
- self.body = {}
- self.query_parameters = {}
- self.query_parameters['api-version'] = '2019-07-01'
- self.header_parameters = {}
- self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
-
- super(AzureRMGalleryImages, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- self.body[key] = kwargs[key]
-
- self.inflate_parameters(self.module_arg_spec, self.body, 0)
-
- old_response = None
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- resource_group = self.get_resource_group(self.resource_group)
-
- if 'location' not in self.body:
- self.body['location'] = resource_group.location
-
- self.url = ('/subscriptions' +
- '/{{ subscription_id }}' +
- '/resourceGroups' +
- '/{{ resource_group }}' +
- '/providers' +
- '/Microsoft.Compute' +
- '/galleries' +
- '/{{ gallery_name }}' +
- '/images' +
- '/{{ image_name }}')
- self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
- self.url = self.url.replace('{{ resource_group }}', self.resource_group)
- self.url = self.url.replace('{{ gallery_name }}', self.gallery_name)
- self.url = self.url.replace('{{ image_name }}', self.name)
-
- old_response = self.get_resource()
-
- if not old_response:
- self.log("GalleryImage instance doesn't exist")
-
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log('GalleryImage instance already exists')
-
- if self.state == 'absent':
- self.to_do = Actions.Delete
- else:
- modifiers = {}
- self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
- self.results['modifiers'] = modifiers
- self.results['compare'] = []
- if not self.default_compare(modifiers, self.body, old_response, '', self.results):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log('Need to Create / Update the GalleryImage instance')
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_resource()
-
- # if not old_response:
- self.results['changed'] = True
- # else:
- # self.results['changed'] = old_response.__ne__(response)
- self.log('Creation / Update done')
- elif self.to_do == Actions.Delete:
- self.log('GalleryImage instance deleted')
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_resource()
-
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_resource():
- time.sleep(20)
- else:
- self.log('GalleryImage instance unchanged')
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
-
- return self.results
-
- def create_update_resource(self):
- # self.log('Creating / Updating the GalleryImage instance {0}'.format(self.))
-
- try:
- response = self.mgmt_client.query(self.url,
- 'PUT',
- self.query_parameters,
- self.header_parameters,
- self.body,
- self.status_code,
- 600,
- 30)
- except CloudError as exc:
- self.log('Error attempting to create the GalleryImage instance.')
- self.fail('Error creating the GalleryImage instance: {0}'.format(str(exc)))
-
- try:
- response = json.loads(response.text)
- except Exception:
- response = {'text': response.text}
-
- return response
-
- def delete_resource(self):
- # self.log('Deleting the GalleryImage instance {0}'.format(self.))
- try:
- response = self.mgmt_client.query(self.url,
- 'DELETE',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- except CloudError as e:
- self.log('Error attempting to delete the GalleryImage instance.')
- self.fail('Error deleting the GalleryImage instance: {0}'.format(str(e)))
-
- return True
-
- def get_resource(self):
- # self.log('Checking if the GalleryImage instance {0} is present'.format(self.))
- found = False
- try:
- response = self.mgmt_client.query(self.url,
- 'GET',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- response = json.loads(response.text)
- found = True
- self.log("Response : {0}".format(response))
- # self.log("AzureFirewall instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the AzureFirewall instance.')
- if found is True:
- return response
-
- return False
-
-
-def main():
- AzureRMGalleryImages()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_galleryimage_info.py b/lib/ansible/modules/cloud/azure/azure_rm_galleryimage_info.py
deleted file mode 100644
index e1f23c38f9..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_galleryimage_info.py
+++ /dev/null
@@ -1,274 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Liu Qingyi, (@smile37773)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_galleryimage_info
-version_added: '2.9'
-short_description: Get Azure SIG Image info
-description:
- - Get info of Azure SIG Image.
-options:
- resource_group:
- description:
- - The name of the resource group.
- type: str
- required: true
- gallery_name:
- description:
- - The name of the shared image gallery from which the image definitions are to be retrieved.
- type: str
- required: true
- name:
- description:
- - Resource name.
- type: str
-extends_documentation_fragment:
- - azure
-author:
- - Liu Qingyi (@smile37773)
-
-'''
-
-EXAMPLES = '''
-- name: List gallery images in a gallery.
- azure_rm_galleryimage_info:
- resource_group: myResourceGroup
- gallery_name: myGallery
-- name: Get a gallery image.
- azure_rm_galleryimage_info:
- resource_group: myResourceGroup
- gallery_name: myGallery
- name: myImage
-
-'''
-
-RETURN = '''
-images:
- description:
- - A list of dict results where the key is the name of the image and the values are the info for that image.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup
- /providers/Microsoft.Compute/galleries/myGallery/images/myImage"
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: myImage
- location:
- description:
- - Resource location.
- returned: always
- type: str
- sample: "eastus"
- tags:
- description:
- - Resource tags.
- returned: always
- type: dict
- sample: { "tag": "value" }
- os_state:
- description:
- - The allowed values for OS State are C(generalized).
- type: OperatingSystemStateTypes
- sample: "Generalized"
- os_type:
- description:
- - This property allows you to specify the type of the OS that is included in the disk when creating a VM from a managed image.
- type: OperatingSystemTypes
- sample: "linux/windows"
- identifier:
- description:
- - This is the gallery image definition identifier.
- type: dict
- contains:
- offer:
- description:
- - The name of the gallery image definition offer.
- type: str
- sample: "myOfferName"
- publisher:
- description:
- - The name of the gallery image definition publisher.
- type: str
- sample: "myPublisherName"
- sku:
- description:
- - The name of the gallery image definition sku.
- type: str
- sample: "mySkuName"
-
-'''
-
-import time
-import json
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.azure_rm_common_rest import GenericRestClient
-from copy import deepcopy
-try:
- from msrestazure.azure_exceptions import CloudError
-except Exception:
- # handled in azure_rm_common
- pass
-
-
-class AzureRMGalleryImagesInfo(AzureRMModuleBase):
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- gallery_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
-
- self.resource_group = None
- self.gallery_name = None
- self.name = None
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.url = None
- self.status_code = [200]
-
- self.query_parameters = {}
- self.query_parameters['api-version'] = '2019-03-01'
- self.header_parameters = {}
- self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
-
- self.mgmt_client = None
- super(AzureRMGalleryImagesInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if (self.resource_group is not None and
- self.gallery_name is not None and
- self.name is not None):
- # self.results['gallery_images'] = self.format_item(self.get())
- self.results['images'] = self.get()
- elif (self.resource_group is not None and
- self.gallery_name is not None):
- # self.results['gallery_images'] = self.format_item(self.listbygallery())
- self.results['images'] = self.listbygallery()
- return self.results
-
- def get(self):
- response = None
- results = {}
- # prepare url
- self.url = ('/subscriptions' +
- '/{{ subscription_id }}' +
- '/resourceGroups' +
- '/{{ resource_group }}' +
- '/providers' +
- '/Microsoft.Compute' +
- '/galleries' +
- '/{{ gallery_name }}' +
- '/images' +
- '/{{ image_name }}')
- self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
- self.url = self.url.replace('{{ resource_group }}', self.resource_group)
- self.url = self.url.replace('{{ gallery_name }}', self.gallery_name)
- self.url = self.url.replace('{{ image_name }}', self.name)
-
- try:
- response = self.mgmt_client.query(self.url,
- 'GET',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- results = json.loads(response.text)
- # self.log('Response : {0}'.format(response))
- except CloudError as e:
- self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
-
- return self.format_item(results)
-
- def listbygallery(self):
- response = None
- results = {}
- # prepare url
- self.url = ('/subscriptions' +
- '/{{ subscription_id }}' +
- '/resourceGroups' +
- '/{{ resource_group }}' +
- '/providers' +
- '/Microsoft.Compute' +
- '/galleries' +
- '/{{ gallery_name }}' +
- '/images')
- self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
- self.url = self.url.replace('{{ resource_group }}', self.resource_group)
- self.url = self.url.replace('{{ gallery_name }}', self.gallery_name)
-
- try:
- response = self.mgmt_client.query(self.url,
- 'GET',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- results = json.loads(response.text)
- # self.log('Response : {0}'.format(response))
- except CloudError as e:
- self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
-
- return [self.format_item(x) for x in results['value']] if results['value'] else []
-
- def format_item(self, item):
- d = {
- 'id': item['id'],
- 'name': item['name'],
- 'location': item['location'],
- 'tags': item.get('tags'),
- 'os_state': item['properties']['osState'],
- 'os_type': item['properties']['osType'],
- 'identifier': item['properties']['identifier']
- }
- return d
-
-
-def main():
- AzureRMGalleryImagesInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion.py b/lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion.py
deleted file mode 100644
index 65ef8ffb51..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion.py
+++ /dev/null
@@ -1,629 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_galleryimageversion
-version_added: '2.9'
-short_description: Manage Azure SIG Image Version instance
-description:
- - Create, update and delete instance of Azure SIG Image Version.
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: true
- type: str
- gallery_name:
- description:
- - The name of the Shared Image Gallery in which the Image Definition resides.
- required: true
- type: str
- gallery_image_name:
- description:
- - The name of the gallery Image Definition in which the Image Version is to be created.
- required: true
- type: str
- name:
- description:
- - The name of the gallery Image Version to be created.
- - Needs to follow semantic version name pattern, The allowed characters are digit and period.
- - Digits must be within the range of a 32-bit integer. For example <MajorVersion>.<MinorVersion>.<Patch>.
- required: true
- type: str
- location:
- description:
- - Resource location.
- type: str
- storage_profile:
- description:
- - Storage profile
- required: true
- version_added: "2.10"
- type: dict
- suboptions:
- source_image:
- description:
- - Reference to managed image or gallery image version
- - Could be resource ID to managed image, or dictionary containing I(resource_group) and I(name)
- - Could be resource ID to image version, or dictionary containing I(resource_group),I(gallery_name), I(gallery_image_name) and I(version)
- - Mutual exclusive with os_disk and data_disks
- type: raw
- os_disk:
- description:
- - os disk snapshot
- - Mutual exclusive with source_image
- type: raw
- suboptions:
- source:
- description:
- - Reference to os disk snapshot. Could be resource ID or dictionary containing I(resource_group) and I(name)
- type: str
- host_caching:
- description:
- - host disk caching
- type: str
- default: None
- choices:
- - None
- - ReadOnly
- - ReadWrite
- data_disks:
- description:
- - list of data disk snapshot
- - Mutual exclusive with source_image
- type: list
- suboptions:
- source:
- description:
- - Reference to data disk snapshot. Could be resource ID or dictionary containing I(resource_group) and I(name)
- type: str
- lun:
- description:
- - lun of the data disk
- type: int
- host_caching:
- description:
- - host disk caching
- type: str
- default: None
- choices:
- - None
- - ReadOnly
- - ReadWrite
- publishing_profile:
- description:
- - Publishing profile.
- required: true
- type: dict
- suboptions:
- target_regions:
- description:
- - The target regions where the Image Version is going to be replicated to.
- - This property is updatable.
- type: list
- suboptions:
- name:
- description:
- - Region name.
- type: str
- regional_replica_count:
- description:
- - The number of replicas of the Image Version to be created per region.
- - This property would take effect for a region when regionalReplicaCount is not specified.
- - This property is updatable.
- type: str
- storage_account_type:
- description:
- - Storage account type.
- type: str
- managed_image:
- description:
- - Managed image reference, could be resource ID, or dictionary containing I(resource_group) and I(name)
- - Obsolete since 2.10, use storage_profile instead
- snapshot:
- description:
- - Source snapshot to be used.
- - Obsolete since 2.10, use storage_profile instead
- replica_count:
- description:
- - The number of replicas of the Image Version to be created per region.
- - This property would take effect for a region when regionalReplicaCount is not specified.
- - This property is updatable.
- type: int
- exclude_from_latest:
- description:
- If I(exclude_from_latest=true), Virtual Machines deployed from the latest version of the Image Definition won't use this Image Version.
- type: bool
- end_of_life_date:
- description:
- - The end of life date of the gallery Image Version.
- - This property can be used for decommissioning purposes.
- - This property is updatable. Format should be according to ISO-8601, for instance "2019-06-26".
- type: str
- storage_account_type:
- description:
- - Specifies the storage account type to be used to store the image.
- - This property is not updatable.
- type: str
- state:
- description:
- - Assert the state of the GalleryImageVersion.
- - Use C(present) to create or update an GalleryImageVersion and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
- type: str
-extends_documentation_fragment:
- - azure
- - azure_tags
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
-- name: Create a gallery image version form a managed image
- azure_rm_galleryimageversion:
- resource_group: myResourceGroup
- gallery_name: myGallery
- gallery_image_name: myGalleryImage
- name: 1.1.0
- location: East US
- publishing_profile:
- end_of_life_date: "2020-10-01t00:00:00+00:00"
- exclude_from_latest: yes
- replica_count: 4
- storage_account_type: Standard_LRS
- target_regions:
- - name: West US
- regional_replica_count: 1
- - name: East US
- regional_replica_count: 3
- storage_account_type: Standard_LRS
- storage_profile:
- source_image: /subscriptions/sub123/resourceGroups/group123/providers/Microsoft.Compute/images/myOsImage
-
-- name: Create a gallery image version from another gallery image version
- azure_rm_galleryimageversion:
- resource_group: myResourceGroup
- gallery_name: myGallery
- gallery_image_name: myGalleryImage
- name: 1.2.0
- location: East US
- publishing_profile:
- end_of_life_date: "2020-10-01t00:00:00+00:00"
- exclude_from_latest: yes
- replica_count: 4
- storage_account_type: Standard_LRS
- target_regions:
- - name: West US
- regional_replica_count: 1
- - name: East US
- regional_replica_count: 3
- storage_account_type: Standard_LRS
- storage_profile:
- source_image:
- version: 1.1.0
- gallery_name: myGallery2
- gallery_image_name: myGalleryImage2
-
-- name: Create gallery image by using one os dist snapshot and zero or many data disk snapshots
- azure_rm_galleryimageversion:
- resource_group: myRsourceGroup
- gallery_name: myGallery
- gallery_image_name: myGalleryImage
- name: 3.4.0
- location: East US
- publishing_profile:
- end_of_life_date: "2020-10-01t00:00:00+00:00"
- exclude_from_latest: yes
- replica_count: 1
- storage_account_type: Standard_LRS
- target_regions:
- - name: East US
- regional_replica_count: 1
- storage_account_type: Standard_LRS
- storage_profile:
- os_disk:
- source: "/subscriptions/mySub/resourceGroups/myGroup/providers/Microsoft.Compute/snapshots/os_snapshot_vma"
- data_disks:
- - lun: 0
- source:
- name: data_snapshot_vma
- - lun: 1
- source: "/subscriptions/mySub/resourceGroups/myGroup/providers/Microsoft.Compute/snapshots/data_snapshot_vmb"
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/galleries/myGalle
- ry1283/images/myImage/versions/10.1.3"
-'''
-
-import time
-import json
-from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
-from ansible.module_utils.azure_rm_common_rest import GenericRestClient
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMGalleryImageVersions(AzureRMModuleBaseExt):
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- updatable=False,
- disposition='resourceGroupName',
- required=True
- ),
- gallery_name=dict(
- type='str',
- updatable=False,
- disposition='galleryName',
- required=True
- ),
- gallery_image_name=dict(
- type='str',
- updatable=False,
- disposition='galleryImageName',
- required=True
- ),
- name=dict(
- type='str',
- updatable=False,
- disposition='galleryImageVersionName',
- required=True
- ),
- location=dict(
- type='str',
- updatable=False,
- disposition='/',
- comparison='location'
- ),
- storage_profile=dict(
- type='dict',
- updatable=False,
- disposition='/properties/storageProfile',
- comparison='ignore',
- options=dict(
- source_image=dict(
- type='raw',
- disposition='source/id',
- purgeIfNone=True,
- pattern=[('/subscriptions/{subscription_id}/resourceGroups'
- '/{resource_group}/providers/Microsoft.Compute'
- '/images/{name}'),
- ('/subscriptions/{subscription_id}/resourceGroups'
- '/{resource_group}/providers/Microsoft.Compute'
- '/galleries/{gallery_name}/images/{gallery_image_name}'
- '/versions/{version}')]
- ),
- os_disk=dict(
- type='dict',
- disposition='osDiskImage',
- purgeIfNone=True,
- comparison='ignore',
- options=dict(
- source=dict(
- type='raw',
- disposition='source/id',
- pattern=('/subscriptions/{subscription_id}/resourceGroups'
- '/{resource_group}/providers/Microsoft.Compute'
- '/snapshots/{name}')
- ),
- host_caching=dict(
- type='str',
- disposition='hostCaching',
- default="None",
- choices=["ReadOnly", "ReadWrite", "None"]
- )
- )
- ),
- data_disks=dict(
- type='list',
- disposition='dataDiskImages',
- purgeIfNone=True,
- options=dict(
- lun=dict(
- type='int'
- ),
- source=dict(
- type='raw',
- disposition="source/id",
- pattern=('/subscriptions/{subscription_id}/resourceGroups'
- '/{resource_group}/providers/Microsoft.Compute'
- '/snapshots/{name}')
- ),
- host_caching=dict(
- type='str',
- disposition='hostCaching',
- default="None",
- choices=["ReadOnly", "ReadWrite", "None"]
- )
- )
- )
- )
- ),
- publishing_profile=dict(
- type='dict',
- disposition='/properties/publishingProfile',
- options=dict(
- target_regions=dict(
- type='list',
- disposition='targetRegions',
- options=dict(
- name=dict(
- type='str',
- required=True,
- comparison='location'
- ),
- regional_replica_count=dict(
- type='int',
- disposition='regionalReplicaCount'
- ),
- storage_account_type=dict(
- type='str',
- disposition='storageAccountType'
- )
- )
- ),
- managed_image=dict(
- type='raw',
- pattern=('/subscriptions/{subscription_id}/resourceGroups'
- '/{resource_group}/providers/Microsoft.Compute'
- '/images/{name}'),
- comparison='ignore'
- ),
- snapshot=dict(
- type='raw',
- pattern=('/subscriptions/{subscription_id}/resourceGroups'
- '/{resource_group}/providers/Microsoft.Compute'
- '/snapshots/{name}'),
- comparison='ignore'
- ),
- replica_count=dict(
- type='int',
- disposition='replicaCount'
- ),
- exclude_from_latest=dict(
- type='bool',
- disposition='excludeFromLatest'
- ),
- end_of_life_date=dict(
- type='str',
- disposition='endOfLifeDate'
- ),
- storage_account_type=dict(
- type='str',
- disposition='storageAccountType',
- choices=['Standard_LRS',
- 'Standard_ZRS']
- )
- )
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.gallery_name = None
- self.gallery_image_name = None
- self.name = None
- self.gallery_image_version = None
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.url = None
- self.status_code = [200, 201, 202]
- self.to_do = Actions.NoAction
-
- self.body = {}
- self.query_parameters = {}
- self.query_parameters['api-version'] = '2019-07-01'
- self.header_parameters = {}
- self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
-
- super(AzureRMGalleryImageVersions, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- self.body[key] = kwargs[key]
-
- self.inflate_parameters(self.module_arg_spec, self.body, 0)
-
- # keep backward compatibility
- snapshot = self.body.get('properties', {}).get('publishingProfile', {}).pop('snapshot', None)
- if snapshot is not None:
- self.body['properties'].setdefault('storageProfile', {}).setdefault('osDiskImage', {}).setdefault('source', {})['id'] = snapshot
- managed_image = self.body.get('properties', {}).get('publishingProfile', {}).pop('managed_image', None)
- if managed_image:
- self.body['properties'].setdefault('storageProfile', {}).setdefault('source', {})['id'] = managed_image
-
- old_response = None
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- resource_group = self.get_resource_group(self.resource_group)
-
- if 'location' not in self.body:
- self.body['location'] = resource_group.location
-
- self.url = ('/subscriptions' +
- '/{{ subscription_id }}' +
- '/resourceGroups' +
- '/{{ resource_group }}' +
- '/providers' +
- '/Microsoft.Compute' +
- '/galleries' +
- '/{{ gallery_name }}' +
- '/images' +
- '/{{ image_name }}' +
- '/versions' +
- '/{{ version_name }}')
- self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
- self.url = self.url.replace('{{ resource_group }}', self.resource_group)
- self.url = self.url.replace('{{ gallery_name }}', self.gallery_name)
- self.url = self.url.replace('{{ image_name }}', self.gallery_image_name)
- self.url = self.url.replace('{{ version_name }}', self.name)
-
- old_response = self.get_resource()
-
- if not old_response:
- self.log("GalleryImageVersion instance doesn't exist")
-
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log('GalleryImageVersion instance already exists')
-
- if self.state == 'absent':
- self.to_do = Actions.Delete
- else:
- modifiers = {}
- self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
- self.results['modifiers'] = modifiers
- self.results['compare'] = []
- if not self.default_compare(modifiers, self.body, old_response, '', self.results):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log('Need to Create / Update the GalleryImageVersion instance')
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_resource()
-
- self.results['changed'] = True
- self.log('Creation / Update done')
- elif self.to_do == Actions.Delete:
- self.log('GalleryImageVersion instance deleted')
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_resource()
- else:
- self.log('GalleryImageVersion instance unchanged')
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
-
- return self.results
-
- def create_update_resource(self):
- # self.log('Creating / Updating the GalleryImageVersion instance {0}'.format(self.))
-
- try:
- response = self.mgmt_client.query(self.url,
- 'PUT',
- self.query_parameters,
- self.header_parameters,
- self.body,
- self.status_code,
- 600,
- 30)
- except CloudError as exc:
- self.log('Error attempting to create the GalleryImageVersion instance.')
- self.fail('Error creating the GalleryImageVersion instance: {0}'.format(str(exc)))
-
- try:
- response = json.loads(response.text)
- except Exception:
- response = {'text': response.text}
-
- while response['properties']['provisioningState'] == 'Creating':
- time.sleep(60)
- response = self.get_resource()
-
- return response
-
- def delete_resource(self):
- # self.log('Deleting the GalleryImageVersion instance {0}'.format(self.))
- try:
- response = self.mgmt_client.query(self.url,
- 'DELETE',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- except CloudError as e:
- self.log('Error attempting to delete the GalleryImageVersion instance.')
- self.fail('Error deleting the GalleryImageVersion instance: {0}'.format(str(e)))
- return True
-
- def get_resource(self):
- # self.log('Checking if the GalleryImageVersion instance {0} is present'.format(self.))
- found = False
- try:
- response = self.mgmt_client.query(self.url,
- 'GET',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- response = json.loads(response.text)
- found = True
- self.log("Response : {0}".format(response))
- # self.log("AzureFirewall instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the AzureFirewall instance.')
- if found is True:
- return response
-
- return False
-
-
-def main():
- AzureRMGalleryImageVersions()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion_info.py b/lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion_info.py
deleted file mode 100644
index a5d5339f2f..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion_info.py
+++ /dev/null
@@ -1,270 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Liu Qingyi, (@smile37773)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_galleryimageversion_info
-version_added: '2.9'
-short_description: Get Azure SIG Image Version info
-description:
- - Get info of Azure SIG Image Version.
-options:
- resource_group:
- description:
- - The name of the resource group.
- type: str
- required: true
- gallery_name:
- description:
- - The name of the Shared Image Gallery in which the Image Definition resides.
- type: str
- required: true
- gallery_image_name:
- description:
- - The name of the gallery Image Definition in which the Image Version resides.
- type: str
- required: true
- name:
- description:
- - Resource name.
- type: str
-extends_documentation_fragment:
- - azure
-author:
- - Liu Qingyi (@smile37773)
-
-'''
-
-EXAMPLES = '''
-- name: List gallery image versions in a gallery image definition.
- azure_rm_galleryimageversion_info:
- resource_group: myResourceGroup
- gallery_name: myGallery
- gallery_image_name: myImage
-- name: Get a gallery image version.
- azure_rm_galleryimageversion_info:
- resource_group: myResourceGroup
- gallery_name: myGallery
- gallery_image_name: myImage
- name: myVersion
-
-'''
-
-RETURN = '''
-versions:
- description:
- A list of dict results where the key is the name of the version and the values are the info for that version.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups
- /myResourceGroup/providers/Microsoft.Compute/galleries/myGallery/images/myImage/versions/myVersion"
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: "myVersion"
- location:
- description:
- - Resource location.
- returned: always
- type: str
- sample: "eastus"
- tags:
- description:
- - Resource tags.
- returned: always
- type: dict
- sample: { "tag": "value" }
- publishing_profile:
- description:
- - The publishing profile of a gallery image version.
- type: dict
- provisioning_state:
- description:
- - The current state of the gallery.
- type: str
- sample: "Succeeded"
-
-'''
-
-import time
-import json
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.azure_rm_common_rest import GenericRestClient
-from copy import deepcopy
-try:
- from msrestazure.azure_exceptions import CloudError
-except Exception:
- # handled in azure_rm_common
- pass
-
-
-class AzureRMGalleryImageVersionsInfo(AzureRMModuleBase):
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- gallery_name=dict(
- type='str',
- required=True
- ),
- gallery_image_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
-
- self.resource_group = None
- self.gallery_name = None
- self.gallery_image_name = None
- self.name = None
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.url = None
- self.status_code = [200]
-
- self.query_parameters = {}
- self.query_parameters['api-version'] = '2019-03-01'
- self.header_parameters = {}
- self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
-
- self.mgmt_client = None
- super(AzureRMGalleryImageVersionsInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if (self.resource_group is not None and
- self.gallery_name is not None and
- self.gallery_image_name is not None and
- self.name is not None):
- self.results['versions'] = self.get()
- elif (self.resource_group is not None and
- self.gallery_name is not None and
- self.gallery_image_name is not None):
- self.results['versions'] = self.listbygalleryimage()
- return self.results
-
- def get(self):
- response = None
- results = {}
- # prepare url
- self.url = ('/subscriptions' +
- '/{{ subscription_id }}' +
- '/resourceGroups' +
- '/{{ resource_group }}' +
- '/providers' +
- '/Microsoft.Compute' +
- '/galleries' +
- '/{{ gallery_name }}' +
- '/images' +
- '/{{ image_name }}' +
- '/versions' +
- '/{{ version_name }}')
- self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
- self.url = self.url.replace('{{ resource_group }}', self.resource_group)
- self.url = self.url.replace('{{ gallery_name }}', self.gallery_name)
- self.url = self.url.replace('{{ image_name }}', self.gallery_image_name)
- self.url = self.url.replace('{{ version_name }}', self.name)
-
- try:
- response = self.mgmt_client.query(self.url,
- 'GET',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- results = json.loads(response.text)
- # self.log('Response : {0}'.format(response))
- except CloudError as e:
- self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
-
- return self.format_item(results)
-
- def listbygalleryimage(self):
- response = None
- results = {}
- # prepare url
- self.url = ('/subscriptions' +
- '/{{ subscription_id }}' +
- '/resourceGroups' +
- '/{{ resource_group }}' +
- '/providers' +
- '/Microsoft.Compute' +
- '/galleries' +
- '/{{ gallery_name }}' +
- '/images' +
- '/{{ image_name }}' +
- '/versions')
- self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
- self.url = self.url.replace('{{ resource_group }}', self.resource_group)
- self.url = self.url.replace('{{ gallery_name }}', self.gallery_name)
- self.url = self.url.replace('{{ image_name }}', self.gallery_image_name)
-
- try:
- response = self.mgmt_client.query(self.url,
- 'GET',
- self.query_parameters,
- self.header_parameters,
- None,
- self.status_code,
- 600,
- 30)
- results = json.loads(response.text)
- # self.log('Response : {0}'.format(response))
- except CloudError as e:
- self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
-
- return [self.format_item(x) for x in results['value']] if results['value'] else []
-
- def format_item(self, item):
- d = {
- 'id': item['id'],
- 'name': item['name'],
- 'location': item['location'],
- 'tags': item.get('tags'),
- 'publishing_profile': item['properties']['publishingProfile'],
- 'provisioning_state': item['properties']['provisioningState']
- }
- return d
-
-
-def main():
- AzureRMGalleryImageVersionsInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_hdinsightcluster.py b/lib/ansible/modules/cloud/azure/azure_rm_hdinsightcluster.py
deleted file mode 100644
index 831b7860b4..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_hdinsightcluster.py
+++ /dev/null
@@ -1,555 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_hdinsightcluster
-version_added: "2.8"
-short_description: Manage Azure HDInsight Cluster instance
-description:
- - Create, update and delete instance of Azure HDInsight Cluster.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- name:
- description:
- - The name of the cluster.
- required: True
- location:
- description:
- - Resource location. If not set, location from the resource group will be used as default.
- cluster_version:
- description:
- - The version of the cluster. For example C(3.6).
- os_type:
- description:
- - The type of operating system.
- choices:
- - 'linux'
- tier:
- description:
- - The cluster tier.
- choices:
- - 'standard'
- - 'premium'
- cluster_definition:
- description:
- - The cluster definition.
- suboptions:
- kind:
- description:
- - The type of cluster.
- choices:
- - hadoop
- - spark
- - hbase
- - storm
- gateway_rest_username:
- description:
- - Gateway REST user name.
- gateway_rest_password:
- description:
- - Gateway REST password.
- compute_profile_roles:
- description:
- - The list of roles in the cluster.
- type: list
- suboptions:
- name:
- description:
- - The name of the role.
- choices:
- - 'headnode'
- - 'workernode'
- - 'zookepernode'
- min_instance_count:
- description:
- - The minimum instance count of the cluster.
- target_instance_count:
- description:
- - The instance count of the cluster.
- vm_size:
- description:
- - The size of the VM.
- linux_profile:
- description:
- - The Linux OS profile.
- suboptions:
- username:
- description:
- - SSH user name.
- password:
- description:
- - SSH password.
- storage_accounts:
- description:
- - The list of storage accounts in the cluster.
- type: list
- suboptions:
- name:
- description:
- - Blob storage endpoint. For example storage_account_name.blob.core.windows.net.
- is_default:
- description:
- - Whether or not the storage account is the default storage account.
- container:
- description:
- - The container in the storage account.
- key:
- description:
- - The storage account access key.
- state:
- description:
- - Assert the state of the cluster.
- - Use C(present) to create or update a cluster and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
-- name: Create instance of HDInsight Cluster
- azure_rm_hdinsightcluster:
- resource_group: myResourceGroup
- name: myCluster
- location: eastus2
- cluster_version: 3.6
- os_type: linux
- tier: standard
- cluster_definition:
- kind: spark
- gateway_rest_username: http-user
- gateway_rest_password: MuABCPassword!!@123
- storage_accounts:
- - name: myStorageAccount.blob.core.windows.net
- is_default: yes
- container: myContainer
- key: GExmaxH4lDNdHA9nwAsCt8t4AOQas2y9vXQP1kKALTram7Q3/5xLVIab3+nYG1x63Xyak9/VXxQyNBHA9pDWw==
- compute_profile_roles:
- - name: headnode
- target_instance_count: 2
- hardware_profile:
- vm_size: Standard_D3
- linux_profile:
- username: sshuser
- password: MuABCPassword!!@123
- - name: workernode
- target_instance_count: 2
- vm_size: Standard_D3
- linux_profile:
- username: sshuser
- password: MuABCPassword!!@123
-'''
-
-RETURN = '''
-id:
- description:
- - Fully qualified resource id of the cluster.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.HDInsight/clusters/myCluster
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrestazure.azure_operation import AzureOperationPoller
- from azure.mgmt.hdinsight import HDInsightManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMClusters(AzureRMModuleBase):
- """Configuration class for an Azure RM Cluster resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- cluster_version=dict(
- type='str'
- ),
- os_type=dict(
- type='str',
- choices=['linux']
- ),
- tier=dict(
- type='str',
- choices=['standard',
- 'premium']
- ),
- cluster_definition=dict(
- type='dict'
- ),
- compute_profile_roles=dict(
- type='list'
- ),
- storage_accounts=dict(
- type='list'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.name = None
- self.parameters = dict()
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.to_do = Actions.NoAction
- self.tags_changed = False
- self.new_instance_count = None
-
- super(AzureRMClusters, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- self.parameters[key] = kwargs[key]
-
- dict_expand(self.parameters, ['cluster_version'], 'properties')
- dict_camelize(self.parameters, ['os_type'], True)
- dict_expand(self.parameters, ['os_type'], 'properties')
- dict_camelize(self.parameters, ['tier'], True)
- dict_expand(self.parameters, ['tier'], 'properties')
-
- dict_rename(self.parameters, ['cluster_definition', 'gateway_rest_username'], 'restAuthCredential.username')
- dict_rename(self.parameters, ['cluster_definition', 'gateway_rest_password'], 'restAuthCredential.password')
- dict_expand(self.parameters, ['cluster_definition', 'restAuthCredential.username'], 'gateway')
- dict_expand(self.parameters, ['cluster_definition', 'restAuthCredential.password'], 'gateway')
- dict_expand(self.parameters, ['cluster_definition', 'gateway'], 'configurations')
-
- dict_expand(self.parameters, ['cluster_definition'], 'properties')
- dict_expand(self.parameters, ['compute_profile_roles', 'vm_size'], 'hardware_profile')
- dict_rename(self.parameters, ['compute_profile_roles', 'linux_profile'], 'linux_operating_system_profile')
- dict_expand(self.parameters, ['compute_profile_roles', 'linux_operating_system_profile'], 'os_profile')
- dict_rename(self.parameters, ['compute_profile_roles'], 'roles')
- dict_expand(self.parameters, ['roles'], 'compute_profile')
- dict_expand(self.parameters, ['compute_profile'], 'properties')
- dict_rename(self.parameters, ['storage_accounts'], 'storageaccounts')
- dict_expand(self.parameters, ['storageaccounts'], 'storage_profile')
- dict_expand(self.parameters, ['storage_profile'], 'properties')
-
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(HDInsightManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- resource_group = self.get_resource_group(self.resource_group)
-
- if "location" not in self.parameters:
- self.parameters["location"] = resource_group.location
-
- old_response = self.get_cluster()
-
- if not old_response:
- self.log("Cluster instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("Cluster instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- compare_result = {}
- if (not default_compare(self.parameters, old_response, '', compare_result)):
- if compare_result.pop('/properties/compute_profile/roles/*/target_instance_count', False):
- # check if it's workernode
- new_count = 0
- old_count = 0
- for role in self.parameters['properties']['compute_profile']['roles']:
- if role['name'] == 'workernode':
- new_count = role['target_instance_count']
- for role in old_response['properties']['compute_profile']['roles']:
- if role['name'] == 'workernode':
- old_count = role['target_instance_count']
- if old_count != new_count:
- self.new_instance_count = new_count
- self.to_do = Actions.Update
- if compare_result.pop('/tags', False):
- self.to_do = Actions.Update
- self.tags_changed = True
- if compare_result:
- for k in compare_result.keys():
- self.module.warn("property '" + k + "' cannot be updated (" + compare_result[k] + ")")
- self.module.warn("only tags and target_instance_count can be updated")
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the Cluster instance")
- self.results['changed'] = True
- if self.check_mode:
- return self.results
- response = self.create_update_cluster()
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("Cluster instance deleted")
- self.results['changed'] = True
- if self.check_mode:
- return self.results
- self.delete_cluster()
- else:
- self.log("Cluster instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if self.state == 'present':
- self.results.update(self.format_item(response))
- return self.results
-
- def create_update_cluster(self):
- '''
- Creates or updates Cluster with the specified configuration.
-
- :return: deserialized Cluster instance state dictionary
- '''
- self.log("Creating / Updating the Cluster instance {0}".format(self.name))
-
- try:
- if self.to_do == Actions.Create:
- response = self.mgmt_client.clusters.create(resource_group_name=self.resource_group,
- cluster_name=self.name,
- parameters=self.parameters)
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
- else:
- if self.tags_changed:
- response = self.mgmt_client.clusters.update(resource_group_name=self.resource_group,
- cluster_name=self.name,
- tags=self.parameters.get('tags'))
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
- if self.new_instance_count:
- response = self.mgmt_client.clusters.resize(resource_group_name=self.resource_group,
- cluster_name=self.name,
- target_instance_count=self.new_instance_count)
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
- except CloudError as exc:
- self.fail("Error creating or updating Cluster instance: {0}".format(str(exc)))
- return response.as_dict() if response else {}
-
- def delete_cluster(self):
- '''
- Deletes specified Cluster instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the Cluster instance {0}".format(self.name))
- try:
- response = self.mgmt_client.clusters.delete(resource_group_name=self.resource_group,
- cluster_name=self.name)
- except CloudError as e:
- self.fail("Error deleting the Cluster instance: {0}".format(str(e)))
-
- return True
-
- def get_cluster(self):
- '''
- Gets the properties of the specified Cluster.
-
- :return: deserialized Cluster instance state dictionary
- '''
- self.log("Checking if the Cluster instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mgmt_client.clusters.get(resource_group_name=self.resource_group,
- cluster_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("Cluster instance : {0} found".format(response.name))
- except Exception as e:
- self.log('Did not find the Cluster instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
- def format_item(self, d):
- d = {
- 'id': d.get('id', None)
- }
- return d
-
-
-def default_compare(new, old, path, result):
- if new is None:
- match = True
- elif isinstance(new, dict):
- match = True
- if not isinstance(old, dict):
- result[path] = 'old dict is null'
- match = False
- else:
- for k in new.keys():
- if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
- match = False
- elif isinstance(new, list):
- if not isinstance(old, list) or len(new) != len(old):
- result[path] = 'length is different or null'
- match = False
- elif len(old) == 0:
- match = True
- else:
- match = True
- if isinstance(old[0], dict):
- key = None
- if 'id' in old[0] and 'id' in new[0]:
- key = 'id'
- elif 'name' in old[0] and 'name' in new[0]:
- key = 'name'
- else:
- key = list(old[0])[0]
- new = sorted(new, key=lambda x: x.get(key, ''))
- old = sorted(old, key=lambda x: x.get(key, ''))
- else:
- new = sorted(new)
- old = sorted(old)
- for i in range(len(new)):
- if not default_compare(new[i], old[i], path + '/*', result):
- match = False
- return match
- else:
- if path.endswith('password'):
- match = True
- else:
- if path == '/location' or path.endswith('location_name'):
- new = new.replace(' ', '').lower()
- old = new.replace(' ', '').lower()
- if new == old:
- match = True
- else:
- result[path] = str(new) + ' != ' + str(old)
- match = False
- return match
-
-
-def dict_camelize(d, path, camelize_first):
- if isinstance(d, list):
- for i in range(len(d)):
- dict_camelize(d[i], path, camelize_first)
- elif isinstance(d, dict):
- if len(path) == 1:
- old_value = d.get(path[0], None)
- if old_value is not None:
- d[path[0]] = _snake_to_camel(old_value, camelize_first)
- else:
- sd = d.get(path[0], None)
- if sd is not None:
- dict_camelize(sd, path[1:], camelize_first)
-
-
-def dict_upper(d, path):
- if isinstance(d, list):
- for i in range(len(d)):
- dict_upper(d[i], path)
- elif isinstance(d, dict):
- if len(path) == 1:
- old_value = d.get(path[0], None)
- if old_value is not None:
- d[path[0]] = old_value.upper()
- else:
- sd = d.get(path[0], None)
- if sd is not None:
- dict_upper(sd, path[1:])
-
-
-def dict_rename(d, path, new_name):
- if isinstance(d, list):
- for i in range(len(d)):
- dict_rename(d[i], path, new_name)
- elif isinstance(d, dict):
- if len(path) == 1:
- old_value = d.pop(path[0], None)
- if old_value is not None:
- d[new_name] = old_value
- else:
- sd = d.get(path[0], None)
- if sd is not None:
- dict_rename(sd, path[1:], new_name)
-
-
-def dict_expand(d, path, outer_dict_name):
- if isinstance(d, list):
- for i in range(len(d)):
- dict_expand(d[i], path, outer_dict_name)
- elif isinstance(d, dict):
- if len(path) == 1:
- old_value = d.pop(path[0], None)
- if old_value is not None:
- d[outer_dict_name] = d.get(outer_dict_name, {})
- d[outer_dict_name][path[0]] = old_value
- else:
- sd = d.get(path[0], None)
- if sd is not None:
- dict_expand(sd, path[1:], outer_dict_name)
-
-
-def _snake_to_camel(snake, capitalize_first=False):
- if capitalize_first:
- return ''.join(x.capitalize() or '_' for x in snake.split('_'))
- else:
- return snake.split('_')[0] + ''.join(x.capitalize() or '_' for x in snake.split('_')[1:])
-
-
-def main():
- """Main execution"""
- AzureRMClusters()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_hdinsightcluster_info.py b/lib/ansible/modules/cloud/azure/azure_rm_hdinsightcluster_info.py
deleted file mode 100644
index 296fdec637..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_hdinsightcluster_info.py
+++ /dev/null
@@ -1,321 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_hdinsightcluster_info
-version_added: "2.9"
-short_description: Get Azure HDInsight Cluster facts
-description:
- - Get facts of Azure HDInsight Cluster.
-
-options:
- resource_group:
- description:
- - Name of an Azure resource group.
- name:
- description:
- - HDInsight cluster name.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of HDInsight Cluster
- azure_rm_hdinsightcluster_info:
- resource_group: myResourceGroup
- name: myCluster
-
- - name: List instances of HDInsight Cluster
- azure_rm_hdinsightcluster_info:
- resource_group: myResourceGroup
-'''
-
-RETURN = '''
-clusters:
- description:
- - A list of dictionaries containing facts for HDInsight Cluster.
- returned: always
- type: complex
- contains:
- id:
- description:
- - The unique resource identifier of the HDInsight Cluster.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.HDInsight/clusters/myCluster"
- resource_group:
- description:
- - Name of an Azure resource group.
- returned: always
- type: str
- sample: myResourceGroup
- name:
- description:
- - The name of the HDInsight Cluster.
- returned: always
- type: str
- sample: testaccount
- location:
- description:
- - The location of the resource group to which the resource belongs.
- returned: always
- type: str
- sample: westus
- cluster_version:
- description:
- - The version of the cluster.
- returned: always
- type: str
- sample: 3.6.1000.67
- os_type:
- description:
- - The type of operating system.
- returned: always
- type: str
- sample: linux
- tier:
- description:
- - The cluster tier.
- returned: always
- type: str
- sample: standard
- cluster_definition:
- description:
- - The cluster definition.
- contains:
- kind:
- description:
- - The type of cluster.
- returned: always
- type: str
- sample: spark
- compute_profile_roles:
- description:
- - The list of roles in the cluster.
- type: list
- contains:
- name:
- description:
- - The name of the role.
- returned: always
- type: str
- sample: headnode
- target_instance_count:
- description:
- - The instance count of the cluster.
- returned: always
- type: int
- sample: 2
- vm_size:
- description:
- - The size of the VM.
- returned: always
- type: str
- sample: Standard_D3
- linux_profile:
- description:
- - The Linux OS profile.
- contains:
- username:
- description:
- - User name.
- returned: always
- type: str
- sample: myuser
- connectivity_endpoints:
- description:
- - Cluster's connectivity endpoints.
- type: list
- contains:
- location:
- description:
- - Endpoint location.
- returned: always
- type: str
- sample: myCluster-ssh.azurehdinsight.net
- name:
- description:
- - Endpoint name.
- returned: always
- type: str
- sample: SSH
- port:
- description:
- - Endpoint port.
- returned: always
- type: int
- sample: 22
- protocol:
- description:
- - Endpoint protocol.
- returned: always
- type: str
- sample: TCP
- tags:
- description:
- - The tags of the resource.
- returned: always
- type: complex
- sample: {}
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _camel_to_snake
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.hdinsight import HDInsightManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMHDInsightclusterInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str'
- ),
- name=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.name = None
- self.tags = None
-
- super(AzureRMHDInsightclusterInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_hdinsightcluster_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_hdinsightcluster_facts' module has been renamed to 'azure_rm_hdinsightcluster_info'",
- version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(HDInsightManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if self.name is not None:
- self.results['clusters'] = self.get()
- elif self.resource_group is not None:
- self.results['clusters'] = self.list_by_resource_group()
- else:
- self.results['clusters'] = self.list_all()
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.clusters.get(resource_group_name=self.resource_group,
- cluster_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for HDInsight Cluster.')
-
- if response and self.has_tags(response.tags, self.tags):
- results.append(self.format_response(response))
-
- return results
-
- def list_by_resource_group(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.clusters.list_by_resource_group(resource_group_name=self.resource_group)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for HDInsight Cluster.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_response(item))
-
- return results
-
- def list_all(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.clusters.list()
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for HDInsight Cluster.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_response(item))
-
- return results
-
- def format_response(self, item):
- d = item.as_dict()
- d = {
- 'id': d.get('id'),
- 'resource_group': self.parse_resource_to_dict(d.get('id')).get('resource_group'),
- 'name': d.get('name', None),
- 'location': d.get('location', '').replace(' ', '').lower(),
-
- 'cluster_version': d.get('properties', {}).get('cluster_version'),
- 'os_type': d.get('properties', {}).get('os_type'),
- 'tier': d.get('properties', {}).get('tier'),
- 'cluster_definition': {
- 'kind': d.get('properties', {}).get('cluster_definition', {}).get('kind')
- },
- 'compute_profile_roles': [{
- 'name': item.get('name'),
- 'target_instance_count': item.get('target_instance_count'),
- 'vm_size': item.get('hardware_profile', {}).get('vm_size'),
- 'linux_profile': {
- 'username': item.get('os_profile', {}).get('linux_operating_system_profile', {}).get('username')
- }
- } for item in d.get('properties', []).get('compute_profile', {}).get('roles', [])],
- 'connectivity_endpoints': d.get('properties', {}).get('connectivity_endpoints'),
- 'tags': d.get('tags', None)
- }
-
- return d
-
-
-def main():
- AzureRMHDInsightclusterInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_image.py b/lib/ansible/modules/cloud/azure/azure_rm_image.py
deleted file mode 100644
index b2cdf5331b..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_image.py
+++ /dev/null
@@ -1,370 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_image
-version_added: "2.5"
-short_description: Manage Azure image
-description:
- - Create, delete an image from virtual machine, blob uri, managed disk or snapshot.
-options:
- resource_group:
- description:
- - Name of resource group.
- required: true
- name:
- description:
- - Name of the image.
- required: true
- source:
- description:
- - OS disk source from the same region.
- - It can be a virtual machine, OS disk blob URI, managed OS disk, or OS snapshot.
- - Each type of source except for blob URI can be given as resource id, name or a dict contains C(resource_group), C(name) and C(type).
- - If source type is blob URI, the source should be the full URI of the blob in string type.
- - If you specify the I(type) in a dict, acceptable value contains C(disks), C(virtual_machines) and C(snapshots).
- type: raw
- required: true
- data_disk_sources:
- description:
- - List of data disk sources, including unmanaged blob URI, managed disk id or name, or snapshot id or name.
- type: list
- location:
- description:
- - Location of the image. Derived from I(resource_group) if not specified.
- os_type:
- description: The OS type of image.
- choices:
- - Windows
- - Linux
- state:
- description:
- - Assert the state of the image. Use C(present) to create or update a image and C(absent) to delete an image.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
-- name: Create an image from a virtual machine
- azure_rm_image:
- resource_group: myResourceGroup
- name: myImage
- source: myVirtualMachine
-
-- name: Create an image from os disk
- azure_rm_image:
- resource_group: myResourceGroup
- name: myImage
- source: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Compute/disks/disk001
- data_disk_sources:
- - datadisk001
- - datadisk002
- os_type: Linux
-
-- name: Create an image from os disk via dict
- azure_rm_image:
- resource_group: myResourceGroup
- name: myImage
- source:
- type: disks
- resource_group: myResourceGroup
- name: disk001
- data_disk_sources:
- - datadisk001
- - datadisk002
- os_type: Linux
-
-- name: Delete an image
- azure_rm_image:
- state: absent
- resource_group: myResourceGroup
- name: myImage
- source: testvm001
-'''
-
-RETURN = '''
-id:
- description:
- - Image resource path.
- type: str
- returned: success
- example: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Compute/images/myImage"
-''' # NOQA
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
-
-try:
- from msrestazure.tools import parse_resource_id
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMImage(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- location=dict(type='str'),
- source=dict(type='raw'),
- data_disk_sources=dict(type='list', default=[]),
- os_type=dict(type='str', choices=['Windows', 'Linux'])
- )
-
- self.results = dict(
- changed=False,
- id=None
- )
-
- required_if = [
- ('state', 'present', ['source'])
- ]
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.location = None
- self.source = None
- self.data_disk_sources = None
- self.os_type = None
-
- super(AzureRMImage, self).__init__(self.module_arg_spec, supports_check_mode=True, required_if=required_if)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- results = None
- changed = False
- image = None
-
- if not self.location:
- # Set default location
- resource_group = self.get_resource_group(self.resource_group)
- self.location = resource_group.location
-
- self.log('Fetching image {0}'.format(self.name))
- image = self.get_image()
- if image:
- self.check_provisioning_state(image, self.state)
- results = image.id
- # update is not supported except for tags
- update_tags, tags = self.update_tags(image.tags)
- if update_tags:
- changed = True
- self.tags = tags
- if self.state == 'absent':
- changed = True
- # the image does not exist and create a new one
- elif self.state == 'present':
- changed = True
-
- self.results['changed'] = changed
- self.results['id'] = results
-
- if changed:
- if self.state == 'present':
- image_instance = None
- # create from virtual machine
- vm = self.get_source_vm()
- if vm:
- if self.data_disk_sources:
- self.fail('data_disk_sources is not allowed when capturing image from vm')
- image_instance = self.compute_models.Image(location=self.location,
- source_virtual_machine=self.compute_models.SubResource(id=vm.id),
- tags=self.tags)
- else:
- if not self.os_type:
- self.fail('os_type is required to create the image')
- os_disk = self.create_os_disk()
- data_disks = self.create_data_disks()
- storage_profile = self.compute_models.ImageStorageProfile(os_disk=os_disk, data_disks=data_disks)
- image_instance = self.compute_models.Image(location=self.location, storage_profile=storage_profile, tags=self.tags)
-
- # finally make the change if not check mode
- if not self.check_mode and image_instance:
- new_image = self.create_image(image_instance)
- self.results['id'] = new_image.id
-
- elif self.state == 'absent':
- if not self.check_mode:
- # delete image
- self.delete_image()
- # the delete does not actually return anything. if no exception, then we'll assume it worked.
- self.results['id'] = None
-
- return self.results
-
- def resolve_storage_source(self, source):
- blob_uri = None
- disk = None
- snapshot = None
- # blob URI can only be given by str
- if isinstance(source, str) and source.lower().endswith('.vhd'):
- blob_uri = source
- return (blob_uri, disk, snapshot)
-
- tokenize = dict()
- if isinstance(source, dict):
- tokenize = source
- elif isinstance(source, str):
- tokenize = parse_resource_id(source)
- else:
- self.fail("source parameter should be in type string or dictionary")
- if tokenize.get('type') == 'disks':
- disk = format_resource_id(tokenize['name'],
- tokenize.get('subscription_id') or self.subscription_id,
- 'Microsoft.Compute',
- 'disks',
- tokenize.get('resource_group') or self.resource_group)
- return (blob_uri, disk, snapshot)
-
- if tokenize.get('type') == 'snapshots':
- snapshot = format_resource_id(tokenize['name'],
- tokenize.get('subscription_id') or self.subscription_id,
- 'Microsoft.Compute',
- 'snapshots',
- tokenize.get('resource_group') or self.resource_group)
- return (blob_uri, disk, snapshot)
-
- # not a disk or snapshots
- if 'type' in tokenize:
- return (blob_uri, disk, snapshot)
-
- # source can be name of snapshot or disk
- snapshot_instance = self.get_snapshot(tokenize.get('resource_group') or self.resource_group,
- tokenize['name'])
- if snapshot_instance:
- snapshot = snapshot_instance.id
- return (blob_uri, disk, snapshot)
-
- disk_instance = self.get_disk(tokenize.get('resource_group') or self.resource_group,
- tokenize['name'])
- if disk_instance:
- disk = disk_instance.id
- return (blob_uri, disk, snapshot)
-
- def create_os_disk(self):
- blob_uri, disk, snapshot = self.resolve_storage_source(self.source)
- snapshot_resource = self.compute_models.SubResource(id=snapshot) if snapshot else None
- managed_disk = self.compute_models.SubResource(id=disk) if disk else None
- return self.compute_models.ImageOSDisk(os_type=self.os_type,
- os_state=self.compute_models.OperatingSystemStateTypes.generalized,
- snapshot=snapshot_resource,
- managed_disk=managed_disk,
- blob_uri=blob_uri)
-
- def create_data_disk(self, lun, source):
- blob_uri, disk, snapshot = self.resolve_storage_source(source)
- if blob_uri or disk or snapshot:
- snapshot_resource = self.compute_models.SubResource(id=snapshot) if snapshot else None
- managed_disk = self.compute_models.SubResource(id=disk) if disk else None
- return self.compute_models.ImageDataDisk(lun=lun,
- blob_uri=blob_uri,
- snapshot=snapshot_resource,
- managed_disk=managed_disk)
-
- def create_data_disks(self):
- return list(filter(None, [self.create_data_disk(lun, source) for lun, source in enumerate(self.data_disk_sources)]))
-
- def get_source_vm(self):
- # self.resource can be a vm (id/name/dict), or not a vm. return the vm iff it is an existing vm.
- resource = dict()
- if isinstance(self.source, dict):
- if self.source.get('type') != 'virtual_machines':
- return None
- resource = dict(type='virtualMachines',
- name=self.source['name'],
- resource_group=self.source.get('resource_group') or self.resource_group)
- elif isinstance(self.source, str):
- vm_resource_id = format_resource_id(self.source,
- self.subscription_id,
- 'Microsoft.Compute',
- 'virtualMachines',
- self.resource_group)
- resource = parse_resource_id(vm_resource_id)
- else:
- self.fail("Unsupported type of source parameter, please give string or dictionary")
- return self.get_vm(resource['resource_group'], resource['name']) if resource['type'] == 'virtualMachines' else None
-
- def get_snapshot(self, resource_group, snapshot_name):
- return self._get_resource(self.compute_client.snapshots.get, resource_group, snapshot_name)
-
- def get_disk(self, resource_group, disk_name):
- return self._get_resource(self.compute_client.disks.get, resource_group, disk_name)
-
- def get_vm(self, resource_group, vm_name):
- return self._get_resource(self.compute_client.virtual_machines.get, resource_group, vm_name, 'instanceview')
-
- def get_image(self):
- return self._get_resource(self.compute_client.images.get, self.resource_group, self.name)
-
- def _get_resource(self, get_method, resource_group, name, expand=None):
- try:
- if expand:
- return get_method(resource_group, name, expand=expand)
- else:
- return get_method(resource_group, name)
- except CloudError as cloud_err:
- # Return None iff the resource is not found
- if cloud_err.status_code == 404:
- self.log('{0}'.format(str(cloud_err)))
- return None
- self.fail('Error: failed to get resource {0} - {1}'.format(name, str(cloud_err)))
- except Exception as exc:
- self.fail('Error: failed to get resource {0} - {1}'.format(name, str(exc)))
-
- def create_image(self, image):
- try:
- poller = self.compute_client.images.create_or_update(self.resource_group, self.name, image)
- new_image = self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error creating image {0} - {1}".format(self.name, str(exc)))
- self.check_provisioning_state(new_image)
- return new_image
-
- def delete_image(self):
- self.log('Deleting image {0}'.format(self.name))
- try:
- poller = self.compute_client.images.delete(self.resource_group, self.name)
- result = self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error deleting image {0} - {1}".format(self.name, str(exc)))
-
- return result
-
-
-def main():
- AzureRMImage()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_image_info.py b/lib/ansible/modules/cloud/azure/azure_rm_image_info.py
deleted file mode 100644
index 10a53f27d1..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_image_info.py
+++ /dev/null
@@ -1,307 +0,0 @@
-#!/usr/bin/python
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_image_info
-
-version_added: "2.9"
-
-short_description: Get facts about azure custom images
-
-description:
- - List azure custom images. The images can be listed where scope of listing can be based on subscription, resource group, name or tags.
-
-options:
- resource_group:
- description:
- - Name of resource group.
- name:
- description:
- - Name of the image to filter from existing images.
- tags:
- description:
- - List of tags to be matched.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Madhura Naniwadekar (@Madhura-CSI)
-'''
-
-
-EXAMPLES = '''
-- name: List images with name
- azure_rm_image_info:
- name: test-image
- resource_group: myResourceGroup
-
-- name: List images by resource group
- azure_rm_image_info:
- resource_group: myResourceGroup
- tags:
- - testing
- - foo:bar
-
-- name: List all available images under current subscription
- azure_rm_image_info:
-'''
-
-
-RETURN = '''
-images:
- description:
- - List of image dicts.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Id of the image.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/images/xx
- name:
- description:
- - Name of the image.
- returned: always
- type: str
- resource_group:
- description:
- - Resource group of the image.
- returned: always
- type: str
- sample: myResourceGroup
- location:
- description:
- - Location of the image.
- returned: always
- type: str
- os_disk:
- description:
- - Id of os disk for image.
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/disks/xx
- os_disk_caching:
- description:
- - Specifies caching requirements for the image.
- returned: always
- type: str
- os_state:
- description:
- - Specifies image operating system state. Possible values are C(Generalized) or C(Specialized).
- returned: always
- type: str
- sample: Generalized
- os_storage_account_type:
- description:
- - Specifies the storage account type for the managed disk.
- type: str
- sample: Standard_LRS
- os_type:
- description:
- - Type of OS for image.
- returned: always
- type: str
- sample: Linux
- provisioning_state:
- description:
- - State of image.
- returned: always
- type: str
- sample: Succeeded
- source:
- description:
- - Resource id of source VM from which the image is created.
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/xx
- tags:
- description:
- - Dictionary of tags associated with the image.
- type: complex
- data_disks:
- description:
- - List of data disks associated with the image.
- type: complex
- returned: always
- contains:
- caching:
- description:
- - Type of caching of data disk.
- sample: read_only
- disk_size_gb:
- description:
- - Specifies the size of empty data disks in gigabytes.
- returned: always
- type: int
- sample: 50
- lun:
- description:
- - Specifies the logical unit number of the data disk.
- returned: always
- type: int
- sample: 0
- storage_account_type:
- description:
- - Specifies the storage account type for the managed disk data disk.
- type: str
- sample: Standard_LRS
- managed_disk_id:
- description:
- - Id of managed disk.
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/disks/xx
- blob_uri:
- description:
- - The virtual hard disk.
-'''
-
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-
-AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
-
-
-class AzureRMImageInfo(AzureRMModuleBase):
-
- def __init__(self, **kwargs):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str'),
- name=dict(type='str'),
- tags=dict(type='list')
- )
-
- self.results = dict(
- changed=False
- )
-
- self.resource_group = None
- self.name = None
- self.format = None
- self.tags = None
-
- super(AzureRMImageInfo, self).__init__(
- derived_arg_spec=self.module_arg_spec,
- supports_tags=False,
- facts_module=True
- )
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_image_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_image_facts' module has been renamed to 'azure_rm_image_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name and self.resource_group:
- self.results['images'] = self.get_image(self.resource_group, self.name)
- elif self.name and not self.resource_group:
- self.results['images'] = self.list_images(self.name)
- elif not self.name and self.resource_group:
- self.results['images'] = self.list_images_by_resource_group(self.resource_group)
- elif not self.name and not self.resource_group:
- self.results['images'] = self.list_images()
- return self.results
-
- def get_image(self, resource_group, image_name):
- '''
- Returns image details based on its name
- '''
-
- self.log('Get properties for {0}'.format(self.name))
-
- result = []
- item = None
- try:
- item = self.compute_client.images.get(resource_group, image_name)
- except CloudError as exc:
- self.fail('Failed to list images - {0}'.format(str(exc)))
-
- result = [self.format_item(item)]
- return result
-
- def list_images_by_resource_group(self, resource_group):
- '''
- Returns image details based on its resource group
- '''
-
- self.log('List images filtered by resource group')
- response = None
- try:
- response = self.compute_client.images.list_by_resource_group(resource_group)
- except CloudError as exc:
- self.fail("Failed to list images: {0}".format(str(exc)))
-
- return [self.format_item(x) for x in response if self.has_tags(x.tags, self.tags)] if response else []
-
- def list_images(self, image_name=None):
- '''
- Returns image details in current subscription
- '''
-
- self.log('List images within current subscription')
- response = None
- results = []
- try:
- response = self.compute_client.images.list()
- except CloudError as exc:
- self.fail("Failed to list all images: {0}".format(str(exc)))
-
- results = [self.format_item(x) for x in response if self.has_tags(x.tags, self.tags)] if response else []
- if image_name:
- results = [result for result in results if result['name'] == image_name]
- return results
-
- def format_item(self, item):
- d = item.as_dict()
-
- for data_disk in d['storage_profile']['data_disks']:
- if 'managed_disk' in data_disk.keys():
- data_disk['managed_disk_id'] = data_disk['managed_disk']['id']
- data_disk.pop('managed_disk', None)
-
- d = {
- 'id': d['id'],
- 'resource_group': d['id'].split('/')[4],
- 'name': d['name'],
- 'location': d['location'],
- 'tags': d.get('tags'),
- 'source': d['source_virtual_machine']['id'] if 'source_virtual_machine' in d.keys() else None,
- 'os_type': d['storage_profile']['os_disk']['os_type'],
- 'os_state': d['storage_profile']['os_disk']['os_state'],
- 'os_disk_caching': d['storage_profile']['os_disk']['caching'],
- 'os_storage_account_type': d['storage_profile']['os_disk']['storage_account_type'],
- 'os_disk': d['storage_profile']['os_disk']['managed_disk']['id'] if 'managed_disk' in d['storage_profile']['os_disk'].keys() else None,
- 'os_blob_uri': d['storage_profile']['os_disk']['blob_uri'] if 'blob_uri' in d['storage_profile']['os_disk'].keys() else None,
- 'provisioning_state': d['provisioning_state'],
- 'data_disks': d['storage_profile']['data_disks']
- }
- return d
-
-
-def main():
- AzureRMImageInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_iotdevice.py b/lib/ansible/modules/cloud/azure/azure_rm_iotdevice.py
deleted file mode 100644
index 86698c2342..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_iotdevice.py
+++ /dev/null
@@ -1,472 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_iotdevice
-version_added: "2.9"
-short_description: Manage Azure IoT hub device
-description:
- - Create, delete an Azure IoT hub device.
-options:
- hub:
- description:
- - Name of IoT Hub.
- type: str
- required: true
- hub_policy_name:
- description:
- - Policy name of the IoT Hub which will be used to query from IoT hub.
- - This policy should have 'RegistryWrite, ServiceConnect, DeviceConnect' accesses. You may get 401 error when you lack any of these.
- type: str
- required: true
- hub_policy_key:
- description:
- - Key of the I(hub_policy_name).
- type: str
- required: true
- name:
- description:
- - Name of the IoT hub device identity.
- type: str
- required: true
- state:
- description:
- - State of the IoT hub. Use C(present) to create or update an IoT hub device and C(absent) to delete an IoT hub device.
- type: str
- default: present
- choices:
- - absent
- - present
- auth_method:
- description:
- - The authorization type an entity is to be created with.
- type: str
- choices:
- - sas
- - certificate_authority
- - self_signed
- default: sas
- primary_key:
- description:
- - Explicit self-signed certificate thumbprint to use for primary key.
- - Explicit Shared Private Key to use for primary key.
- type: str
- aliases:
- - primary_thumbprint
- secondary_key:
- description:
- - Explicit self-signed certificate thumbprint to use for secondary key.
- - Explicit Shared Private Key to use for secondary key.
- type: str
- aliases:
- - secondary_thumbprint
- status:
- description:
- - Set device status upon creation.
- type: bool
- edge_enabled:
- description:
- - Flag indicating edge enablement.
- - Not supported in IoT Hub with Basic tier.
- type: bool
- twin_tags:
- description:
- - A section that the solution back end can read from and write to.
- - Tags are not visible to device apps.
- - "The tag can be nested dictionary, '.', '$', '#', ' ' is not allowed in the key."
- - List is not supported.
- - Not supported in IoT Hub with Basic tier.
- type: dict
- desired:
- description:
- - Used along with reported properties to synchronize device configuration or conditions.
- - "The tag can be nested dictionary, '.', '$', '#', ' ' is not allowed in the key."
- - List is not supported.
- - Not supported in IoT Hub with Basic tier.
- type: dict
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
-- name: Create simplest Azure IoT Hub device
- azure_rm_iotdevice:
- hub: myHub
- name: Testing
- hub_policy_name: iothubowner
- hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
-
-- name: Create Azure IoT Edge device
- azure_rm_iotdevice:
- hub: myHub
- name: Testing
- hub_policy_name: iothubowner
- hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- edge_enabled: yes
-
-- name: Create Azure IoT Hub device with device twin properties and tag
- azure_rm_iotdevice:
- hub: myHub
- name: Testing
- hub_policy_name: iothubowner
- hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- twin_tags:
- location:
- country: US
- city: Redmond
- sensor: humidity
- desired:
- period: 100
-'''
-
-RETURN = '''
-device:
- description:
- - IoT Hub device.
- returned: always
- type: dict
- sample: {
- "authentication": {
- "symmetricKey": {
- "primaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- },
- "type": "sas",
- "x509Thumbprint": {
- "primaryThumbprint": null,
- "secondaryThumbprint": null
- }
- },
- "capabilities": {
- "iotEdge": false
- },
- "changed": true,
- "cloudToDeviceMessageCount": 0,
- "connectionState": "Disconnected",
- "connectionStateUpdatedTime": "0001-01-01T00:00:00",
- "deviceId": "Testing",
- "etag": "NzA2NjU2ODc=",
- "failed": false,
- "generationId": "636903014505613307",
- "lastActivityTime": "0001-01-01T00:00:00",
- "modules": [
- {
- "authentication": {
- "symmetricKey": {
- "primaryKey": "XXXXXXXXXXXXXXXXXXX",
- "secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- },
- "type": "sas",
- "x509Thumbprint": {
- "primaryThumbprint": null,
- "secondaryThumbprint": null
- }
- },
- "cloudToDeviceMessageCount": 0,
- "connectionState": "Disconnected",
- "connectionStateUpdatedTime": "0001-01-01T00:00:00",
- "deviceId": "testdevice",
- "etag": "MjgxOTE5ODE4",
- "generationId": "636903840872788074",
- "lastActivityTime": "0001-01-01T00:00:00",
- "managedBy": null,
- "moduleId": "test"
- }
- ],
- "properties": {
- "desired": {
- "$metadata": {
- "$lastUpdated": "2019-04-10T05:00:46.2702079Z",
- "$lastUpdatedVersion": 8,
- "period": {
- "$lastUpdated": "2019-04-10T05:00:46.2702079Z",
- "$lastUpdatedVersion": 8
- }
- },
- "$version": 1,
- "period": 100
- },
- "reported": {
- "$metadata": {
- "$lastUpdated": "2019-04-08T06:24:10.5613307Z"
- },
- "$version": 1
- }
- },
- "status": "enabled",
- "statusReason": null,
- "statusUpdatedTime": "0001-01-01T00:00:00",
- "tags": {
- "location": {
- "country": "us",
- "city": "Redmond"
- },
- "sensor": "humidity"
- }
- }
-''' # NOQA
-
-import json
-import copy
-import re
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
-from ansible.module_utils.common.dict_transformations import _snake_to_camel
-
-try:
- from msrestazure.tools import parse_resource_id
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMIoTDevice(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str', required=True),
- hub_policy_name=dict(type='str', required=True),
- hub_policy_key=dict(type='str', required=True),
- hub=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- status=dict(type='bool'),
- edge_enabled=dict(type='bool'),
- twin_tags=dict(type='dict'),
- desired=dict(type='dict'),
- auth_method=dict(type='str', choices=['self_signed', 'sas', 'certificate_authority'], default='sas'),
- primary_key=dict(type='str', no_log=True, aliases=['primary_thumbprint']),
- secondary_key=dict(type='str', no_log=True, aliases=['secondary_thumbprint'])
- )
-
- self.results = dict(
- changed=False,
- id=None
- )
-
- self.name = None
- self.hub = None
- self.hub_policy_key = None
- self.hub_policy_name = None
- self.state = None
- self.status = None
- self.edge_enabled = None
- self.twin_tags = None
- self.desired = None
- self.auth_method = None
- self.primary_key = None
- self.secondary_key = None
-
- required_if = [
- ['auth_method', 'self_signed', ['certificate_authority']]
- ]
-
- self._base_url = None
- self._mgmt_client = None
- self.query_parameters = {
- 'api-version': '2018-06-30'
- }
- self.header_parameters = {
- 'Content-Type': 'application/json; charset=utf-8',
- 'accept-language': 'en-US'
- }
- super(AzureRMIoTDevice, self).__init__(self.module_arg_spec, supports_check_mode=True, required_if=required_if)
-
- def exec_module(self, **kwargs):
-
- for key in self.module_arg_spec.keys():
- setattr(self, key, kwargs[key])
-
- self._base_url = '{0}.azure-devices.net'.format(self.hub)
- config = {
- 'base_url': self._base_url,
- 'key': self.hub_policy_key,
- 'policy': self.hub_policy_name
- }
- self._mgmt_client = self.get_data_svc_client(**config)
-
- changed = False
-
- device = self.get_device()
- if self.state == 'present':
- if not device:
- changed = True
- auth = {'type': _snake_to_camel(self.auth_method)}
- if self.auth_method == 'self_signed':
- auth['x509Thumbprint'] = {
- 'primaryThumbprint': self.primary_key,
- 'secondaryThumbprint': self.secondary_key
- }
- elif self.auth_method == 'sas':
- auth['symmetricKey'] = {
- 'primaryKey': self.primary_key,
- 'secondaryKey': self.secondary_key
- }
- device = {
- 'deviceId': self.name,
- 'capabilities': {'iotEdge': self.edge_enabled or False},
- 'authentication': auth
- }
- if self.status is not None and not self.status:
- device['status'] = 'disabled'
- else:
- if self.edge_enabled is not None and self.edge_enabled != device['capabilities']['iotEdge']:
- changed = True
- device['capabilities']['iotEdge'] = self.edge_enabled
- if self.status is not None:
- status = 'enabled' if self.status else 'disabled'
- if status != device['status']:
- changed = True
- device['status'] = status
- if changed and not self.check_mode:
- device = self.create_or_update_device(device)
- twin = self.get_twin()
- if twin:
- if not twin.get('tags'):
- twin['tags'] = dict()
- twin_change = False
- if self.twin_tags and not self.is_equal(self.twin_tags, twin['tags']):
- twin_change = True
- if self.desired and not self.is_equal(self.desired, twin['properties']['desired']):
- twin_change = True
- if twin_change and not self.check_mode:
- self.update_twin(twin)
- changed = changed or twin_change
- device['tags'] = twin.get('tags') or dict()
- device['properties'] = twin['properties']
- device['modules'] = self.list_device_modules()
- elif self.twin_tags or self.desired:
- self.fail("Device twin is not supported in IoT Hub with basic tier.")
- elif device:
- if not self.check_mode:
- self.delete_device(device['etag'])
- changed = True
- device = None
- self.results = device or dict()
- self.results['changed'] = changed
- return self.results
-
- def is_equal(self, updated, original):
- changed = False
- if not isinstance(updated, dict):
- self.fail('The Property or Tag should be a dict')
- for key in updated.keys():
- if re.search(r'[.|$|#|\s]', key):
- self.fail("Property or Tag name has invalid characters: '.', '$', '#' or ' '. Got '{0}'".format(key))
- original_value = original.get(key)
- updated_value = updated[key]
- if isinstance(updated_value, dict):
- if not isinstance(original_value, dict):
- changed = True
- original[key] = updated_value
- elif not self.is_equal(updated_value, original_value):
- changed = True
- elif original_value != updated_value:
- changed = True
- original[key] = updated_value
- return not changed
-
- def create_or_update_device(self, device):
- try:
- url = '/devices/{0}'.format(self.name)
- headers = copy.copy(self.header_parameters)
- if device.get('etag'):
- headers['If-Match'] = '"{0}"'.format(device['etag'])
- request = self._mgmt_client.put(url, self.query_parameters)
- response = self._mgmt_client.send(request=request, headers=headers, content=device)
- if response.status_code not in [200, 201, 202]:
- raise CloudError(response)
- return json.loads(response.text)
- except Exception as exc:
- if exc.status_code in [403] and self.edge_enabled:
- self.fail('Edge device is not supported in IoT Hub with Basic tier.')
- else:
- self.fail('Error when creating or updating IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
-
- def delete_device(self, etag):
- try:
- url = '/devices/{0}'.format(self.name)
- headers = copy.copy(self.header_parameters)
- headers['If-Match'] = '"{0}"'.format(etag)
- request = self._mgmt_client.delete(url, self.query_parameters)
- response = self._mgmt_client.send(request=request, headers=headers)
- if response.status_code not in [204]:
- raise CloudError(response)
- except Exception as exc:
- self.fail('Error when deleting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
-
- def get_device(self):
- try:
- url = '/devices/{0}'.format(self.name)
- device = self._https_get(url, self.query_parameters, self.header_parameters)
- return device
- except Exception as exc:
- if exc.status_code in [404]:
- return None
- else:
- self.fail('Error when getting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
-
- def get_twin(self):
- try:
- url = '/twins/{0}'.format(self.name)
- return self._https_get(url, self.query_parameters, self.header_parameters)
- except Exception as exc:
- if exc.status_code in [403]:
- # The Basic sku has nothing to to with twin
- return None
- else:
- self.fail('Error when getting IoT Hub device {0} twin: {1}'.format(self.name, exc.message or str(exc)))
-
- def update_twin(self, twin):
- try:
- url = '/twins/{0}'.format(self.name)
- headers = copy.copy(self.header_parameters)
- headers['If-Match'] = '"{0}"'.format(twin['etag'])
- request = self._mgmt_client.patch(url, self.query_parameters)
- response = self._mgmt_client.send(request=request, headers=headers, content=twin)
- if response.status_code not in [200]:
- raise CloudError(response)
- return json.loads(response.text)
- except Exception as exc:
- self.fail('Error when creating or updating IoT Hub device twin {0}: {1}'.format(self.name, exc.message or str(exc)))
-
- def list_device_modules(self):
- try:
- url = '/devices/{0}/modules'.format(self.name)
- return self._https_get(url, self.query_parameters, self.header_parameters)
- except Exception as exc:
- self.fail('Error when listing IoT Hub device {0} modules: {1}'.format(self.name, exc.message or str(exc)))
-
- def _https_get(self, url, query_parameters, header_parameters):
- request = self._mgmt_client.get(url, query_parameters)
- response = self._mgmt_client.send(request=request, headers=header_parameters, content=None)
- if response.status_code not in [200]:
- raise CloudError(response)
- return json.loads(response.text)
-
-
-def main():
- AzureRMIoTDevice()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_iotdevice_info.py b/lib/ansible/modules/cloud/azure/azure_rm_iotdevice_info.py
deleted file mode 100644
index a283b68bb5..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_iotdevice_info.py
+++ /dev/null
@@ -1,313 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_iotdevice_info
-version_added: "2.9"
-short_description: Facts of Azure IoT hub device
-description:
- - Query, get Azure IoT hub device.
-options:
- hub:
- description:
- - Name of IoT Hub.
- type: str
- required: true
- hub_policy_name:
- description:
- - Policy name of the IoT Hub which will be used to query from IoT hub.
- - This policy should have at least 'Registry Read' access.
- type: str
- required: true
- hub_policy_key:
- description:
- - Key of the I(hub_policy_name).
- type: str
- required: true
- name:
- description:
- - Name of the IoT hub device identity.
- type: str
- aliases:
- - device_id
- module_id:
- description:
- - Name of the IoT hub device module.
- - Must use with I(device_id) defined.
- type: str
- query:
- description:
- - Query an IoT hub to retrieve information regarding device twins using a SQL-like language.
- - "See U(https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-query-language)."
- type: str
- top:
- description:
- - Used when I(name) not defined.
- - List the top n devices in the query.
- type: int
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yuwei Zhou (@yuwzho)
-'''
-
-EXAMPLES = '''
-- name: Get the details of a device
- azure_rm_iotdevice_info:
- name: Testing
- hub: MyIoTHub
- hub_policy_name: registryRead
- hub_policy_key: XXXXXXXXXXXXXXXXXXXX
-
-- name: Query all device modules in an IoT Hub
- azure_rm_iotdevice_info:
- query: "SELECT * FROM devices.modules"
- hub: MyIoTHub
- hub_policy_name: registryRead
- hub_policy_key: XXXXXXXXXXXXXXXXXXXX
-
-- name: List all devices in an IoT Hub
- azure_rm_iotdevice_info:
- hub: MyIoTHub
- hub_policy_name: registryRead
- hub_policy_key: XXXXXXXXXXXXXXXXXXXX
-'''
-
-RETURN = '''
-iot_devices:
- description:
- - IoT Hub device.
- returned: always
- type: dict
- sample: {
- "authentication": {
- "symmetricKey": {
- "primaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- },
- "type": "sas",
- "x509Thumbprint": {
- "primaryThumbprint": null,
- "secondaryThumbprint": null
- }
- },
- "capabilities": {
- "iotEdge": false
- },
- "changed": true,
- "cloudToDeviceMessageCount": 0,
- "connectionState": "Disconnected",
- "connectionStateUpdatedTime": "0001-01-01T00:00:00",
- "deviceId": "Testing",
- "etag": "NzA2NjU2ODc=",
- "failed": false,
- "generationId": "636903014505613307",
- "lastActivityTime": "0001-01-01T00:00:00",
- "modules": [
- {
- "authentication": {
- "symmetricKey": {
- "primaryKey": "XXXXXXXXXXXXXXXXXXX",
- "secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- },
- "type": "sas",
- "x509Thumbprint": {
- "primaryThumbprint": null,
- "secondaryThumbprint": null
- }
- },
- "cloudToDeviceMessageCount": 0,
- "connectionState": "Disconnected",
- "connectionStateUpdatedTime": "0001-01-01T00:00:00",
- "deviceId": "testdevice",
- "etag": "MjgxOTE5ODE4",
- "generationId": "636903840872788074",
- "lastActivityTime": "0001-01-01T00:00:00",
- "managedBy": null,
- "moduleId": "test"
- }
- ],
- "properties": {
- "desired": {
- "$metadata": {
- "$lastUpdated": "2019-04-10T05:00:46.2702079Z",
- "$lastUpdatedVersion": 8,
- "period": {
- "$lastUpdated": "2019-04-10T05:00:46.2702079Z",
- "$lastUpdatedVersion": 8
- }
- },
- "$version": 1,
- "period": 100
- },
- "reported": {
- "$metadata": {
- "$lastUpdated": "2019-04-08T06:24:10.5613307Z"
- },
- "$version": 1
- }
- },
- "status": "enabled",
- "statusReason": null,
- "statusUpdatedTime": "0001-01-01T00:00:00",
- "tags": {
- "location": {
- "country": "us",
- "city": "Redmond"
- },
- "sensor": "humidity"
- }
- }
-''' # NOQA
-
-import json
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
-from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
-
-try:
- from msrestazure.tools import parse_resource_id
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMIoTDeviceFacts(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str', aliases=['device_id']),
- module_id=dict(type='str'),
- query=dict(type='str'),
- hub=dict(type='str', required=True),
- hub_policy_name=dict(type='str', required=True),
- hub_policy_key=dict(type='str', required=True),
- top=dict(type='int')
- )
-
- self.results = dict(
- changed=False,
- iot_devices=[]
- )
-
- self.name = None
- self.module_id = None
- self.hub = None
- self.hub_policy_name = None
- self.hub_policy_key = None
- self.top = None
-
- self._mgmt_client = None
- self._base_url = None
- self.query_parameters = {
- 'api-version': '2018-06-30'
- }
- self.header_parameters = {
- 'Content-Type': 'application/json; charset=utf-8',
- 'accept-language': 'en-US'
- }
- super(AzureRMIoTDeviceFacts, self).__init__(self.module_arg_spec, supports_check_mode=True)
-
- def exec_module(self, **kwargs):
-
- for key in self.module_arg_spec.keys():
- setattr(self, key, kwargs[key])
-
- self._base_url = '{0}.azure-devices.net'.format(self.hub)
- config = {
- 'base_url': self._base_url,
- 'key': self.hub_policy_key,
- 'policy': self.hub_policy_name
- }
- if self.top:
- self.query_parameters['top'] = self.top
- self._mgmt_client = self.get_data_svc_client(**config)
-
- response = []
- if self.module_id:
- response = [self.get_device_module()]
- elif self.name:
- response = [self.get_device()]
- elif self.query:
- response = self.hub_query()
- else:
- response = self.list_devices()
-
- self.results['iot_devices'] = response
- return self.results
-
- def hub_query(self):
- try:
- url = '/devices/query'
- request = self._mgmt_client.post(url, self.query_parameters)
- query = {
- 'query': self.query
- }
- response = self._mgmt_client.send(request=request, headers=self.header_parameters, content=query)
- if response.status_code not in [200]:
- raise CloudError(response)
- return json.loads(response.text)
- except Exception as exc:
- self.fail('Error when running query "{0}" in IoT Hub {1}: {2}'.format(self.query, self.hub, exc.message or str(exc)))
-
- def get_device(self):
- try:
- url = '/devices/{0}'.format(self.name)
- device = self._https_get(url, self.query_parameters, self.header_parameters)
- device['modules'] = self.list_device_modules()
- return device
- except Exception as exc:
- self.fail('Error when getting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
-
- def get_device_module(self):
- try:
- url = '/devices/{0}/modules/{1}'.format(self.name, self.module_id)
- return self._https_get(url, self.query_parameters, self.header_parameters)
- except Exception as exc:
- self.fail('Error when getting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
-
- def list_device_modules(self):
- try:
- url = '/devices/{0}/modules'.format(self.name)
- return self._https_get(url, self.query_parameters, self.header_parameters)
- except Exception as exc:
- self.fail('Error when getting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
-
- def list_devices(self):
- try:
- url = '/devices'
- return self._https_get(url, self.query_parameters, self.header_parameters)
- except Exception as exc:
- self.fail('Error when listing IoT Hub devices in {0}: {1}'.format(self.hub, exc.message or str(exc)))
-
- def _https_get(self, url, query_parameters, header_parameters):
- request = self._mgmt_client.get(url, query_parameters)
- response = self._mgmt_client.send(request=request, headers=header_parameters, content=None)
- if response.status_code not in [200]:
- raise CloudError(response)
- return json.loads(response.text)
-
-
-def main():
- AzureRMIoTDeviceFacts()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_iotdevicemodule.py b/lib/ansible/modules/cloud/azure/azure_rm_iotdevicemodule.py
deleted file mode 100644
index 19755719e9..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_iotdevicemodule.py
+++ /dev/null
@@ -1,378 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_iotdevicemodule
-version_added: "2.9"
-short_description: Manage Azure IoT hub device module
-description:
- - Create, delete an Azure IoT hub device module.
-options:
- hub:
- description:
- - Name of IoT Hub.
- type: str
- required: true
- hub_policy_name:
- description:
- - Policy name of the IoT Hub which will be used to query from IoT hub.
- - This policy should have at least 'Registry Read' access.
- type: str
- required: true
- hub_policy_key:
- description:
- - Key of the I(hub_policy_name).
- type: str
- required: true
- name:
- description:
- - Name of the IoT hub device identity.
- type: str
- required: true
- device:
- description:
- - Device name the module associate with.
- required: true
- type: str
- state:
- description:
- - State of the IoT hub. Use C(present) to create or update an IoT hub device and C(absent) to delete an IoT hub device.
- type: str
- default: present
- choices:
- - absent
- - present
- auth_method:
- description:
- - The authorization type an entity is to be created with.
- type: str
- choices:
- - sas
- - certificate_authority
- - self_signed
- default: sas
- primary_key:
- description:
- - Explicit self-signed certificate thumbprint to use for primary key.
- - Explicit Shared Private Key to use for primary key.
- type: str
- aliases:
- - primary_thumbprint
- secondary_key:
- description:
- - Explicit self-signed certificate thumbprint to use for secondary key.
- - Explicit Shared Private Key to use for secondary key.
- type: str
- aliases:
- - secondary_thumbprint
- twin_tags:
- description:
- - A section that the solution back end can read from and write to.
- - Tags are not visible to device apps.
- - "The tag can be nested dictionary, '.', '$', '#', ' ' is not allowed in the key."
- - List is not supported.
- type: dict
- desired:
- description:
- - Used along with reported properties to synchronize device configuration or conditions.
- - "The tag can be nested dictionary, '.', '$', '#', ' ' is not allowed in the key."
- - List is not supported.
- type: dict
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
-- name: Create simplest Azure IoT Hub device module
- azure_rm_iotdevicemodule:
- hub: myHub
- name: Testing
- device: mydevice
- hub_policy_name: iothubowner
- hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
-
-- name: Create Azure IoT Edge device module
- azure_rm_iotdevice:
- hub: myHub
- device: mydevice
- name: Testing
- hub_policy_name: iothubowner
- hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- edge_enabled: yes
-
-- name: Create Azure IoT Hub device module with module twin properties and tag
- azure_rm_iotdevice:
- hub: myHub
- name: Testing
- device: mydevice
- hub_policy_name: iothubowner
- hub_policy_key: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- twin_tags:
- location:
- country: US
- city: Redmond
- sensor: humidity
- desired:
- period: 100
-'''
-
-RETURN = '''
-module:
- description:
- - IoT Hub device.
- returned: always
- type: dict
- sample: {
- "authentication": {
- "symmetricKey": {
- "primaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
- "secondaryKey": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
- },
- "type": "sas",
- "x509Thumbprint": {
- "primaryThumbprint": null,
- "secondaryThumbprint": null
- }
- },
- "cloudToDeviceMessageCount": 0,
- "connectionState": "Disconnected",
- "connectionStateUpdatedTime": "0001-01-01T00:00:00",
- "deviceId": "mydevice",
- "etag": "ODM2NjI3ODg=",
- "generationId": "636904759703045768",
- "lastActivityTime": "0001-01-01T00:00:00",
- "managedBy": null,
- "moduleId": "Testing"
- }
-''' # NOQA
-
-import json
-import copy
-import re
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
-from ansible.module_utils.common.dict_transformations import _snake_to_camel
-
-try:
- from msrestazure.tools import parse_resource_id
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMIoTDeviceModule(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str', required=True),
- hub_policy_name=dict(type='str', required=True),
- hub_policy_key=dict(type='str', required=True),
- hub=dict(type='str', required=True),
- device=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- twin_tags=dict(type='dict'),
- desired=dict(type='dict'),
- auth_method=dict(type='str', choices=['self_signed', 'sas', 'certificate_authority'], default='sas'),
- primary_key=dict(type='str', no_log=True, aliases=['primary_thumbprint']),
- secondary_key=dict(type='str', no_log=True, aliases=['secondary_thumbprint'])
- )
-
- self.results = dict(
- changed=False,
- id=None
- )
-
- self.name = None
- self.hub = None
- self.device = None
- self.hub_policy_key = None
- self.hub_policy_name = None
- self.state = None
- self.twin_tags = None
- self.desired = None
- self.auth_method = None
- self.primary_key = None
- self.secondary_key = None
-
- required_if = [
- ['auth_method', 'self_signed', ['certificate_authority']]
- ]
-
- self._base_url = None
- self._mgmt_client = None
- self.query_parameters = {
- 'api-version': '2018-06-30'
- }
- self.header_parameters = {
- 'Content-Type': 'application/json; charset=utf-8',
- 'accept-language': 'en-US'
- }
- super(AzureRMIoTDeviceModule, self).__init__(self.module_arg_spec, supports_check_mode=True, required_if=required_if)
-
- def exec_module(self, **kwargs):
-
- for key in self.module_arg_spec.keys():
- setattr(self, key, kwargs[key])
-
- self._base_url = '{0}.azure-devices.net'.format(self.hub)
- config = {
- 'base_url': self._base_url,
- 'key': self.hub_policy_key,
- 'policy': self.hub_policy_name
- }
- self._mgmt_client = self.get_data_svc_client(**config)
-
- changed = False
-
- module = self.get_module()
- if self.state == 'present':
- if not module:
- changed = True
- auth = {'type': _snake_to_camel(self.auth_method)}
- if self.auth_method == 'self_signed':
- auth['x509Thumbprint'] = {
- 'primaryThumbprint': self.primary_key,
- 'secondaryThumbprint': self.secondary_key
- }
- elif self.auth_method == 'sas':
- auth['symmetricKey'] = {
- 'primaryKey': self.primary_key,
- 'secondaryKey': self.secondary_key
- }
- module = {
- 'deviceId': self.device,
- 'moduleId': self.name,
- 'authentication': auth
- }
- if changed and not self.check_mode:
- module = self.create_or_update_module(module)
- twin = self.get_twin()
- if not twin.get('tags'):
- twin['tags'] = dict()
- twin_change = False
- if self.twin_tags and not self.is_equal(self.twin_tags, twin['tags']):
- twin_change = True
- if self.desired and not self.is_equal(self.desired, twin['properties']['desired']):
- self.module.warn('desired')
- twin_change = True
- if twin_change and not self.check_mode:
- twin = self.update_twin(twin)
- changed = changed or twin_change
- module['tags'] = twin.get('tags') or dict()
- module['properties'] = twin['properties']
- elif module:
- if not self.check_mode:
- self.delete_module(module['etag'])
- changed = True
- module = None
- self.results = module or dict()
- self.results['changed'] = changed
- return self.results
-
- def is_equal(self, updated, original):
- changed = False
- if not isinstance(updated, dict):
- self.fail('The Property or Tag should be a dict')
- for key in updated.keys():
- if re.search(r'[.|$|#|\s]', key):
- self.fail("Property or Tag name has invalid characters: '.', '$', '#' or ' '. Got '{0}'".format(key))
- original_value = original.get(key)
- updated_value = updated[key]
- if isinstance(updated_value, dict):
- if not isinstance(original_value, dict):
- changed = True
- original[key] = updated_value
- elif not self.is_equal(updated_value, original_value):
- changed = True
- elif original_value != updated_value:
- changed = True
- original[key] = updated_value
- return not changed
-
- def create_or_update_module(self, module):
- try:
- url = '/devices/{0}/modules/{1}'.format(self.device, self.name)
- headers = copy.copy(self.header_parameters)
- if module.get('etag'):
- headers['If-Match'] = '"{0}"'.format(module['etag'])
- request = self._mgmt_client.put(url, self.query_parameters)
- response = self._mgmt_client.send(request=request, headers=headers, content=module)
- if response.status_code not in [200, 201]:
- raise CloudError(response)
- return json.loads(response.text)
- except Exception as exc:
- self.fail('Error when creating or updating IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
-
- def delete_module(self, etag):
- try:
- url = '/devices/{0}/modules/{1}'.format(self.device, self.name)
- headers = copy.copy(self.header_parameters)
- headers['If-Match'] = '"{0}"'.format(etag)
- request = self._mgmt_client.delete(url, self.query_parameters)
- response = self._mgmt_client.send(request=request, headers=headers)
- if response.status_code not in [204]:
- raise CloudError(response)
- except Exception as exc:
- self.fail('Error when deleting IoT Hub device {0}: {1}'.format(self.name, exc.message or str(exc)))
-
- def get_module(self):
- try:
- url = '/devices/{0}/modules/{1}'.format(self.device, self.name)
- return self._https_get(url, self.query_parameters, self.header_parameters)
- except Exception:
- return None
-
- def get_twin(self):
- try:
- url = '/twins/{0}/modules/{1}'.format(self.device, self.name)
- return self._https_get(url, self.query_parameters, self.header_parameters)
- except Exception as exc:
- self.fail('Error when getting IoT Hub device {0} module twin {1}: {2}'.format(self.device, self.name, exc.message or str(exc)))
-
- def update_twin(self, twin):
- try:
- url = '/twins/{0}/modules/{1}'.format(self.device, self.name)
- headers = copy.copy(self.header_parameters)
- headers['If-Match'] = twin['etag']
- request = self._mgmt_client.patch(url, self.query_parameters)
- response = self._mgmt_client.send(request=request, headers=headers, content=twin)
- if response.status_code not in [200]:
- raise CloudError(response)
- return json.loads(response.text)
- except Exception as exc:
- self.fail('Error when creating or updating IoT Hub device {0} module twin {1}: {2}'.format(self.device, self.name, exc.message or str(exc)))
-
- def _https_get(self, url, query_parameters, header_parameters):
- request = self._mgmt_client.get(url, query_parameters)
- response = self._mgmt_client.send(request=request, headers=header_parameters, content=None)
- if response.status_code not in [200]:
- raise CloudError(response)
- return json.loads(response.text)
-
-
-def main():
- AzureRMIoTDeviceModule()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_iothub.py b/lib/ansible/modules/cloud/azure/azure_rm_iothub.py
deleted file mode 100644
index 56684afff8..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_iothub.py
+++ /dev/null
@@ -1,895 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_iothub
-version_added: "2.9"
-short_description: Manage Azure IoT hub
-description:
- - Create, delete an Azure IoT hub.
-options:
- resource_group:
- description:
- - Name of resource group.
- type: str
- required: true
- name:
- description:
- - Name of the IoT hub.
- type: str
- required: true
- state:
- description:
- - State of the IoT hub. Use C(present) to create or update an IoT hub and C(absent) to delete an IoT hub.
- type: str
- default: present
- choices:
- - absent
- - present
- location:
- description:
- - Location of the IoT hub.
- type: str
- sku:
- description:
- - Pricing tier for Azure IoT Hub.
- - Note that only one free IoT hub instance is allowed in each subscription. Exception will be thrown if free instances exceed one.
- - Default is C(s1) when creation.
- type: str
- choices:
- - b1
- - b2
- - b3
- - f1
- - s1
- - s2
- - s3
- unit:
- description:
- - Units in your IoT Hub.
- - Default is C(1).
- type: int
- event_endpoint:
- description:
- - The Event Hub-compatible endpoint property.
- type: dict
- suboptions:
- partition_count:
- description:
- - The number of partitions for receiving device-to-cloud messages in the Event Hub-compatible endpoint.
- - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)."
- - Default is C(2).
- type: int
- retention_time_in_days:
- description:
- - The retention time for device-to-cloud messages in days.
- - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)."
- - Default is C(1).
- type: int
- enable_file_upload_notifications:
- description:
- - File upload notifications are enabled if set to C(True).
- type: bool
- ip_filters:
- description:
- - Configure rules for rejecting or accepting traffic from specific IPv4 addresses.
- type: list
- suboptions:
- name:
- description:
- - Name of the filter.
- type: str
- required: yes
- ip_mask:
- description:
- - A string that contains the IP address range in CIDR notation for the rule.
- type: str
- required: yes
- action:
- description:
- - The desired action for requests captured by this rule.
- type: str
- required: yes
- choices:
- - accept
- - reject
- routing_endpoints:
- description:
- - Custom endpoints.
- type: list
- suboptions:
- name:
- description:
- - Name of the custom endpoint.
- type: str
- required: yes
- resource_group:
- description:
- - Resource group of the endpoint.
- - Default is the same as I(resource_group).
- type: str
- subscription:
- description:
- - Subscription id of the endpoint.
- - Default is the same as I(subscription).
- type: str
- resource_type:
- description:
- - Resource type of the custom endpoint.
- type: str
- choices:
- - eventhub
- - queue
- - storage
- - topic
- required: yes
- connection_string:
- description:
- - Connection string of the custom endpoint.
- - The connection string should have send privilege.
- type: str
- required: yes
- container:
- description:
- - Container name of the custom endpoint when I(resource_type=storage).
- type: str
- encoding:
- description:
- - Encoding of the message when I(resource_type=storage).
- type: str
- routes:
- description:
- - Route device-to-cloud messages to service-facing endpoints.
- type: list
- suboptions:
- name:
- description:
- - Name of the route.
- type: str
- required: yes
- source:
- description:
- - The origin of the data stream to be acted upon.
- type: str
- choices:
- - device_messages
- - twin_change_events
- - device_lifecycle_events
- - device_job_lifecycle_events
- required: yes
- enabled:
- description:
- - Whether to enable the route.
- type: bool
- required: yes
- endpoint_name:
- description:
- - The name of the endpoint in I(routing_endpoints) where IoT Hub sends messages that match the query.
- type: str
- required: yes
- condition:
- description:
- - "The query expression for the routing query that is run against the message application properties,
- system properties, message body, device twin tags, and device twin properties to determine if it is a match for the endpoint."
- - "For more information about constructing a query,
- see U(https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-routing-query-syntax)"
- type: str
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
-- name: Create a simplest IoT hub
- azure_rm_iothub:
- name: Testing
- resource_group: myResourceGroup
-- name: Create an IoT hub with route
- azure_rm_iothub:
- resource_group: myResourceGroup
- name: Testing
- routing_endpoints:
- - connection_string: "Endpoint=sb://qux.servicebus.windows.net/;SharedAccessKeyName=quux;SharedAccessKey=****;EntityPath=myQueue"
- name: foo
- resource_type: queue
- resource_group: myResourceGroup1
- routes:
- - name: bar
- source: device_messages
- endpoint_name: foo
- enabled: yes
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID of the IoT hub.
- sample: "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/myResourceGroup/providers/Microsoft.Devices/IotHubs/Testing"
- returned: success
- type: str
-name:
- description:
- - Name of the IoT hub.
- sample: Testing
- returned: success
- type: str
-resource_group:
- description:
- - Resource group of the IoT hub.
- sample: myResourceGroup.
- returned: success
- type: str
-location:
- description:
- - Location of the IoT hub.
- sample: eastus
- returned: success
- type: str
-unit:
- description:
- - Units in the IoT Hub.
- sample: 1
- returned: success
- type: int
-sku:
- description:
- - Pricing tier for Azure IoT Hub.
- sample: f1
- returned: success
- type: str
-cloud_to_device:
- description:
- - Cloud to device message properties.
- contains:
- max_delivery_count:
- description:
- - The number of times the IoT hub attempts to deliver a message on the feedback queue.
- - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages)."
- type: int
- returned: success
- sample: 10
- ttl_as_iso8601:
- description:
- - The period of time for which a message is available to consume before it is expired by the IoT hub.
- - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages)."
- type: str
- returned: success
- sample: "1:00:00"
- returned: success
- type: complex
-enable_file_upload_notifications:
- description:
- - Whether file upload notifications are enabled.
- sample: True
- returned: success
- type: bool
-event_endpoints:
- description:
- - Built-in endpoint where to deliver device message.
- contains:
- endpoint:
- description:
- - The Event Hub-compatible endpoint.
- type: str
- returned: success
- sample: "sb://iothub-ns-testing-1478811-9bbc4a15f0.servicebus.windows.net/"
- partition_count:
- description:
- - The number of partitions for receiving device-to-cloud messages in the Event Hub-compatible endpoint.
- - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)."
- type: int
- returned: success
- sample: 2
- retention_time_in_days:
- description:
- - The retention time for device-to-cloud messages in days.
- - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)."
- type: int
- returned: success
- sample: 1
- partition_ids:
- description:
- - List of the partition id for the event endpoint.
- type: list
- returned: success
- sample: ["0", "1"]
- returned: success
- type: complex
-host_name:
- description:
- - Host of the IoT hub.
- sample: "testing.azure-devices.net"
- returned: success
- type: str
-ip_filters:
- description:
- - Configure rules for rejecting or accepting traffic from specific IPv4 addresses.
- contains:
- name:
- description:
- - Name of the filter.
- type: str
- returned: success
- sample: filter
- ip_mask:
- description:
- - A string that contains the IP address range in CIDR notation for the rule.
- type: str
- returned: success
- sample: 40.54.7.3
- action:
- description:
- - The desired action for requests captured by this rule.
- type: str
- returned: success
- sample: Reject
- returned: success
- type: complex
-routing_endpoints:
- description:
- - Custom endpoints.
- contains:
- event_hubs:
- description:
- - List of custom endpoints of event hubs.
- type: complex
- returned: success
- contains:
- name:
- description:
- - Name of the custom endpoint.
- type: str
- returned: success
- sample: foo
- resource_group:
- description:
- - Resource group of the endpoint.
- type: str
- returned: success
- sample: bar
- subscription:
- description:
- - Subscription id of the endpoint.
- type: str
- returned: success
- sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
- connection_string:
- description:
- - Connection string of the custom endpoint.
- type: str
- returned: success
- sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo"
- service_bus_queues:
- description:
- - List of custom endpoints of service bus queue.
- type: complex
- returned: always
- contains:
- name:
- description:
- - Name of the custom endpoint.
- type: str
- returned: success
- sample: foo
- resource_group:
- description:
- - Resource group of the endpoint.
- type: str
- returned: success
- sample: bar
- subscription:
- description:
- - Subscription ID of the endpoint.
- type: str
- returned: success
- sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
- connection_string:
- description:
- - Connection string of the custom endpoint.
- type: str
- returned: success
- sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo"
- service_bus_topics:
- description:
- - List of custom endpoints of service bus topic.
- type: complex
- returned: success
- contains:
- name:
- description:
- - Name of the custom endpoint.
- type: str
- returned: success
- sample: foo
- resource_group:
- description:
- - Resource group of the endpoint.
- type: str
- returned: success
- sample: bar
- subscription:
- description:
- - Subscription ID of the endpoint.
- type: str
- returned: success
- sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
- connection_string:
- description:
- - Connection string of the custom endpoint.
- type: str
- returned: success
- sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo"
- storage_containers:
- description:
- - List of custom endpoints of storage
- type: complex
- returned: success
- contains:
- name:
- description:
- - Name of the custom endpoint.
- type: str
- returned: success
- sample: foo
- resource_group:
- description:
- - Resource group of the endpoint.
- type: str
- returned: success
- sample: bar
- subscription:
- description:
- - Subscription ID of the endpoint.
- type: str
- returned: success
- sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
- connection_string:
- description:
- - Connection string of the custom endpoint.
- type: str
- returned: success
- sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo"
- returned: success
- type: complex
-routes:
- description:
- - Route device-to-cloud messages to service-facing endpoints.
- type: complex
- returned: success
- contains:
- name:
- description:
- - Name of the route.
- type: str
- returned: success
- sample: route1
- source:
- description:
- - The origin of the data stream to be acted upon.
- type: str
- returned: success
- sample: device_messages
- enabled:
- description:
- - Whether to enable the route.
- type: str
- returned: success
- sample: true
- endpoint_name:
- description:
- - The name of the endpoint in C(routing_endpoints) where IoT Hub sends messages that match the query.
- type: str
- returned: success
- sample: foo
- condition:
- description:
- - "The query expression for the routing query that is run against the message application properties,
- system properties, message body, device twin tags, and device twin properties to determine if it is a match for the endpoint."
- - "For more information about constructing a query,
- see I(https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-routing-query-syntax)"
- type: bool
- returned: success
- sample: "true"
-''' # NOQA
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
-from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
-import re
-
-try:
- from msrestazure.tools import parse_resource_id
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-ip_filter_spec = dict(
- name=dict(type='str', required=True),
- ip_mask=dict(type='str', required=True),
- action=dict(type='str', required=True, choices=['accept', 'reject'])
-)
-
-
-routing_endpoints_spec = dict(
- connection_string=dict(type='str', required=True),
- name=dict(type='str', required=True),
- resource_group=dict(type='str'),
- subscription=dict(type='str'),
- resource_type=dict(type='str', required=True, choices=['eventhub', 'queue', 'storage', 'topic']),
- container=dict(type='str'),
- encoding=dict(type='str')
-)
-
-
-routing_endpoints_resource_type_mapping = {
- 'eventhub': {'model': 'RoutingEventHubProperties', 'attribute': 'event_hubs'},
- 'queue': {'model': 'RoutingServiceBusQueueEndpointProperties', 'attribute': 'service_bus_queues'},
- 'topic': {'model': 'RoutingServiceBusTopicEndpointProperties', 'attribute': 'service_bus_topics'},
- 'storage': {'model': 'RoutingStorageContainerProperties', 'attribute': 'storage_containers'}
-}
-
-
-routes_spec = dict(
- name=dict(type='str', required=True),
- source=dict(type='str', required=True, choices=['device_messages', 'twin_change_events', 'device_lifecycle_events', 'device_job_lifecycle_events']),
- enabled=dict(type='bool', required=True),
- endpoint_name=dict(type='str', required=True),
- condition=dict(type='str')
-)
-
-
-event_endpoint_spec = dict(
- partition_count=dict(type='int'),
- retention_time_in_days=dict(type='int')
-)
-
-
-class AzureRMIoTHub(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- location=dict(type='str'),
- sku=dict(type='str', choices=['b1', 'b2', 'b3', 'f1', 's1', 's2', 's3']),
- unit=dict(type='int'),
- event_endpoint=dict(type='dict', options=event_endpoint_spec),
- enable_file_upload_notifications=dict(type='bool'),
- ip_filters=dict(type='list', elements='dict', options=ip_filter_spec),
- routing_endpoints=dict(type='list', elements='dict', options=routing_endpoints_spec),
- routes=dict(type='list', elements='dict', options=routes_spec)
- )
-
- self.results = dict(
- changed=False,
- id=None
- )
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.location = None
- self.sku = None
- self.unit = None
- self.event_endpoint = None
- self.tags = None
- self.enable_file_upload_notifications = None
- self.ip_filters = None
- self.routing_endpoints = None
- self.routes = None
-
- super(AzureRMIoTHub, self).__init__(self.module_arg_spec, supports_check_mode=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- changed = False
-
- if not self.location:
- # Set default location
- resource_group = self.get_resource_group(self.resource_group)
- self.location = resource_group.location
- self.sku = str.capitalize(self.sku) if self.sku else None
- iothub = self.get_hub()
- if self.state == 'present':
- if not iothub:
- changed = True
- self.sku = self.sku or 'S1'
- self.unit = self.unit or 1
- self.event_endpoint = self.event_endpoint or {}
- self.event_endpoint['partition_count'] = self.event_endpoint.get('partition_count') or 2
- self.event_endpoint['retention_time_in_days'] = self.event_endpoint.get('retention_time_in_days') or 1
- event_hub_properties = dict()
- event_hub_properties['events'] = self.IoThub_models.EventHubProperties(**self.event_endpoint)
- iothub_property = self.IoThub_models.IotHubProperties(event_hub_endpoints=event_hub_properties)
- if self.enable_file_upload_notifications:
- iothub_property.enable_file_upload_notifications = self.enable_file_upload_notifications
- if self.ip_filters:
- iothub_property.ip_filter_rules = self.construct_ip_filters()
- routing_endpoints = None
- routes = None
- if self.routing_endpoints:
- routing_endpoints = self.construct_routing_endpoint(self.routing_endpoints)
- if self.routes:
- routes = [self.construct_route(x) for x in self.routes]
- if routes or routing_endpoints:
- routing_property = self.IoThub_models.RoutingProperties(endpoints=routing_endpoints,
- routes=routes)
- iothub_property.routing = routing_property
- iothub = self.IoThub_models.IotHubDescription(location=self.location,
- sku=self.IoThub_models.IotHubSkuInfo(name=self.sku, capacity=self.unit),
- properties=iothub_property,
- tags=self.tags)
- if not self.check_mode:
- iothub = self.create_or_update_hub(iothub)
- else:
- # compare sku
- original_sku = iothub.sku
- if self.sku and self.sku != original_sku.name:
- self.log('SKU changed')
- iothub.sku.name = self.sku
- changed = True
- if self.unit and self.unit != original_sku.capacity:
- self.log('Unit count changed')
- iothub.sku.capacity = self.unit
- changed = True
- # compare event hub property
- event_hub = iothub.properties.event_hub_endpoints or dict()
- if self.event_endpoint:
- item = self.event_endpoint
- original_item = event_hub.get('events')
- if not original_item:
- changed = True
- event_hub['events'] = self.IoThub_models.EventHubProperties(partition_count=item.get('partition_count') or 2,
- retention_time_in_days=item.get('retention_time_in_days') or 1)
- elif item.get('partition_count') and original_item.partition_count != item['partition_count']:
- changed = True
- original_item.partition_count = item['partition_count']
- elif item.get('retention_time_in_days') and original_item.retention_time_in_days != item['retention_time_in_days']:
- changed = True
- original_item.retention_time_in_days = item['retention_time_in_days']
- # compare endpoint
- original_endpoints = iothub.properties.routing.endpoints
- endpoint_changed = False
- if self.routing_endpoints:
- # find the total length
- total_length = 0
- for item in routing_endpoints_resource_type_mapping.values():
- attribute = item['attribute']
- array = getattr(original_endpoints, attribute)
- total_length += len(array or [])
- if total_length != len(self.routing_endpoints):
- endpoint_changed = True
- else: # If already changed, no need to compare any more
- for item in self.routing_endpoints:
- if not self.lookup_endpoint(item, original_endpoints):
- endpoint_changed = True
- break
- if endpoint_changed:
- iothub.properties.routing.endpoints = self.construct_routing_endpoint(self.routing_endpoints)
- changed = True
- # compare routes
- original_routes = iothub.properties.routing.routes
- routes_changed = False
- if self.routes:
- if len(self.routes) != len(original_routes or []):
- routes_changed = True
- else:
- for item in self.routes:
- if not self.lookup_route(item, original_routes):
- routes_changed = True
- break
- if routes_changed:
- changed = True
- iothub.properties.routing.routes = [self.construct_route(x) for x in self.routes]
- # compare IP filter
- ip_filter_changed = False
- original_ip_filter = iothub.properties.ip_filter_rules
- if self.ip_filters:
- if len(self.ip_filters) != len(original_ip_filter or []):
- ip_filter_changed = True
- else:
- for item in self.ip_filters:
- if not self.lookup_ip_filter(item, original_ip_filter):
- ip_filter_changed = True
- break
- if ip_filter_changed:
- changed = True
- iothub.properties.ip_filter_rules = self.construct_ip_filters()
-
- # compare tags
- tag_changed, updated_tags = self.update_tags(iothub.tags)
- iothub.tags = updated_tags
- if changed and not self.check_mode:
- iothub = self.create_or_update_hub(iothub)
- # only tags changed
- if not changed and tag_changed:
- changed = True
- if not self.check_mode:
- iothub = self.update_instance_tags(updated_tags)
- self.results = self.to_dict(iothub)
- elif iothub:
- changed = True
- if not self.check_mode:
- self.delete_hub()
- self.results['changed'] = changed
- return self.results
-
- def lookup_ip_filter(self, target, ip_filters):
- if not ip_filters or len(ip_filters) == 0:
- return False
- for item in ip_filters:
- if item.filter_name == target['name']:
- if item.ip_mask != target['ip_mask']:
- return False
- if item.action.lower() != target['action']:
- return False
- return True
- return False
-
- def lookup_route(self, target, routes):
- if not routes or len(routes) == 0:
- return False
- for item in routes:
- if item.name == target['name']:
- if target['source'] != _camel_to_snake(item.source):
- return False
- if target['enabled'] != item.is_enabled:
- return False
- if target['endpoint_name'] != item.endpoint_names[0]:
- return False
- if target.get('condition') and target['condition'] != item.condition:
- return False
- return True
- return False
-
- def lookup_endpoint(self, target, routing_endpoints):
- resource_type = target['resource_type']
- attribute = routing_endpoints_resource_type_mapping[resource_type]['attribute']
- endpoints = getattr(routing_endpoints, attribute)
- if not endpoints or len(endpoints) == 0:
- return False
- for item in endpoints:
- if item.name == target['name']:
- if target.get('resource_group') and target['resource_group'] != (item.resource_group or self.resource_group):
- return False
- if target.get('subscription_id') and target['subscription_id'] != (item.subscription_id or self.subscription_id):
- return False
- connection_string_regex = item.connection_string.replace('****', '.*')
- connection_string_regex = re.sub(r':\d+/;', '/;', connection_string_regex)
- if not re.search(connection_string_regex, target['connection_string']):
- return False
- if resource_type == 'storage':
- if target.get('container') and item.container_name != target['container']:
- return False
- if target.get('encoding') and item.encoding != target['encoding']:
- return False
- return True
- return False
-
- def construct_ip_filters(self):
- return [self.IoThub_models.IpFilterRule(filter_name=x['name'],
- action=self.IoThub_models.IpFilterActionType[x['action']],
- ip_mask=x['ip_mask']) for x in self.ip_filters]
-
- def construct_routing_endpoint(self, routing_endpoints):
- if not routing_endpoints or len(routing_endpoints) == 0:
- return None
- result = self.IoThub_models.RoutingEndpoints()
- for endpoint in routing_endpoints:
- resource_type_property = routing_endpoints_resource_type_mapping.get(endpoint['resource_type'])
- resource_type = getattr(self.IoThub_models, resource_type_property['model'])
- array = getattr(result, resource_type_property['attribute']) or []
- array.append(resource_type(**endpoint))
- setattr(result, resource_type_property['attribute'], array)
- return result
-
- def construct_route(self, route):
- if not route:
- return None
- return self.IoThub_models.RouteProperties(name=route['name'],
- source=_snake_to_camel(snake=route['source'], capitalize_first=True),
- is_enabled=route['enabled'],
- endpoint_names=[route['endpoint_name']],
- condition=route.get('condition'))
-
- def get_hub(self):
- try:
- return self.IoThub_client.iot_hub_resource.get(self.resource_group, self.name)
- except Exception:
- return None
-
- def create_or_update_hub(self, hub):
- try:
- poller = self.IoThub_client.iot_hub_resource.create_or_update(self.resource_group, self.name, hub, if_match=hub.etag)
- return self.get_poller_result(poller)
- except Exception as exc:
- self.fail('Error creating or updating IoT Hub {0}: {1}'.format(self.name, exc.message or str(exc)))
-
- def update_instance_tags(self, tags):
- try:
- poller = self.IoThub_client.iot_hub_resource.update(self.resource_group, self.name, tags=tags)
- return self.get_poller_result(poller)
- except Exception as exc:
- self.fail('Error updating IoT Hub {0}\'s tag: {1}'.format(self.name, exc.message or str(exc)))
-
- def delete_hub(self):
- try:
- self.IoThub_client.iot_hub_resource.delete(self.resource_group, self.name)
- return True
- except Exception as exc:
- self.fail('Error deleting IoT Hub {0}: {1}'.format(self.name, exc.message or str(exc)))
- return False
-
- def route_to_dict(self, route):
- return dict(
- name=route.name,
- source=_camel_to_snake(route.source),
- endpoint_name=route.endpoint_names[0],
- enabled=route.is_enabled,
- condition=route.condition
- )
-
- def instance_dict_to_dict(self, instance_dict):
- result = dict()
- if not instance_dict:
- return result
- for key in instance_dict.keys():
- result[key] = instance_dict[key].as_dict()
- return result
-
- def to_dict(self, hub):
- result = dict()
- properties = hub.properties
- result['id'] = hub.id
- result['name'] = hub.name
- result['resource_group'] = self.resource_group
- result['location'] = hub.location
- result['tags'] = hub.tags
- result['unit'] = hub.sku.capacity
- result['sku'] = hub.sku.name.lower()
- result['cloud_to_device'] = dict(
- max_delivery_count=properties.cloud_to_device.feedback.max_delivery_count,
- ttl_as_iso8601=str(properties.cloud_to_device.feedback.ttl_as_iso8601)
- ) if properties.cloud_to_device else dict()
- result['enable_file_upload_notifications'] = properties.enable_file_upload_notifications
- result['event_endpoint'] = properties.event_hub_endpoints.get('events').as_dict() if properties.event_hub_endpoints.get('events') else None
- result['host_name'] = properties.host_name
- result['ip_filters'] = [x.as_dict() for x in properties.ip_filter_rules]
- if properties.routing:
- result['routing_endpoints'] = properties.routing.endpoints.as_dict()
- result['routes'] = [self.route_to_dict(x) for x in properties.routing.routes]
- result['fallback_route'] = self.route_to_dict(properties.routing.fallback_route)
- result['status'] = properties.state
- result['storage_endpoints'] = self.instance_dict_to_dict(properties.storage_endpoints)
- return result
-
-
-def main():
- AzureRMIoTHub()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_iothub_info.py b/lib/ansible/modules/cloud/azure/azure_rm_iothub_info.py
deleted file mode 100644
index 150522e423..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_iothub_info.py
+++ /dev/null
@@ -1,618 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_iothub_info
-
-version_added: "2.9"
-
-short_description: Get IoT Hub facts
-
-description:
- - Get facts for a specific IoT Hub or all IoT Hubs.
-
-options:
- name:
- description:
- - Limit results to a specific resource group.
- type: str
- resource_group:
- description:
- - The resource group to search for the desired IoT Hub.
- type: str
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- type: list
- show_stats:
- description:
- - Show the statistics for IoT Hub.
- - Note this will have network overhead for each IoT Hub.
- type: bool
- show_quota_metrics:
- description:
- - Get the quota metrics for an IoT hub.
- - Note this will have network overhead for each IoT Hub.
- type: bool
- show_endpoint_health:
- description:
- - Get the health for routing endpoints.
- - Note this will have network overhead for each IoT Hub.
- type: bool
- test_route_message:
- description:
- - Test routes message. It will be used to test all routes.
- type: str
- list_consumer_groups:
- description:
- - List the consumer group of the built-in event hub.
- type: bool
- list_keys:
- description:
- - List the keys of IoT Hub.
- - Note this will have network overhead for each IoT Hub.
- type: bool
-extends_documentation_fragment:
- - azure
-
-author:
- - Yuwei Zhou (@yuwzho)
-'''
-
-EXAMPLES = '''
- - name: Get facts for one IoT Hub
- azure_rm_iothub_info:
- name: Testing
- resource_group: myResourceGroup
-
- - name: Get facts for all IoT Hubs
- azure_rm_iothub_info:
-
- - name: Get facts for all IoT Hubs in a specific resource group
- azure_rm_iothub_info:
- resource_group: myResourceGroup
-
- - name: Get facts by tags
- azure_rm_iothub_info:
- tags:
- - testing
-'''
-
-RETURN = '''
-azure_iothubs:
- description:
- - List of IoT Hub dicts.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID of the IoT hub.
- type: str
- returned: always
- sample: "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/myResourceGroup/providers/Microsoft.Devices/IotHubs/Testing"
- name:
- description:
- - Name of the IoT hub.
- type: str
- returned: always
- sample: Testing
- resource_group:
- description:
- - Resource group of the IoT hub.
- type: str
- returned: always
- sample: myResourceGroup.
- location:
- description:
- - Location of the IoT hub.
- type: str
- returned: always
- sample: eastus
- unit:
- description:
- - Units in the IoT Hub.
- type: int
- returned: always
- sample: 1
- sku:
- description:
- - Pricing tier for Azure IoT Hub.
- type: str
- returned: always
- sample: f1
- cloud_to_device:
- description:
- - Cloud to device message properties.
- type: complex
- returned: always
- contains:
- max_delivery_count:
- description:
- - The number of times the IoT hub attempts to deliver a message on the feedback queue.
- - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages)."
- type: int
- returned: always
- sample: 10
- ttl_as_iso8601:
- description:
- - The period of time for which a message is available to consume before it is expired by the IoT hub.
- - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages)."
- type: str
- returned: always
- sample: "1:00:00"
- enable_file_upload_notifications:
- description:
- - Whether file upload notifications are enabled.
- type: str
- returned: always
- sample: True
- event_endpoints:
- description:
- - Built-in endpoint where to deliver device message.
- type: complex
- returned: always
- contains:
- endpoint:
- description:
- - The Event Hub-compatible endpoint.
- type: str
- returned: always
- sample: "sb://iothub-ns-testing-1478811-9bbc4a15f0.servicebus.windows.net/"
- partition_count:
- description:
- - The number of partitions for receiving device-to-cloud messages in the Event Hub-compatible endpoint.
- - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)."
- type: int
- returned: always
- sample: 2
- retention_time_in_days:
- description:
- - The retention time for device-to-cloud messages in days.
- - "See U(https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#device-to-cloud-messages)."
- type: int
- returned: always
- sample: 1
- partition_ids:
- description:
- - List of the partition id for the event endpoint.
- type: list
- returned: always
- sample: ["0", "1"]
- host_name:
- description:
- - Host of the IoT hub.
- type: str
- returned: always
- sample: "testing.azure-devices.net"
- ip_filters:
- description:
- - Configure rules for rejecting or accepting traffic from specific IPv4 addresses.
- type: complex
- returned: always
- contains:
- name:
- description:
- - Name of the filter.
- type: str
- returned: always
- sample: filter
- ip_mask:
- description:
- - A string that contains the IP address range in CIDR notation for the rule.
- type: str
- returned: always
- sample: 40.54.7.3
- action:
- description:
- - The desired action for requests captured by this rule.
- type: str
- returned: always
- sample: Reject
- routing_endpoints:
- description:
- - Custom endpoints.
- type: complex
- returned: always
- contains:
- event_hubs:
- description:
- - List of custom endpoints of event hubs.
- type: complex
- returned: always
- contains:
- name:
- description:
- - Name of the custom endpoint.
- type: str
- returned: always
- sample: foo
- resource_group:
- description:
- - Resource group of the endpoint.
- type: str
- returned: always
- sample: bar
- subscription:
- description:
- - Subscription ID of the endpoint.
- type: str
- returned: always
- sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
- connection_string:
- description:
- - Connection string of the custom endpoint.
- type: str
- returned: always
- sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo"
- service_bus_queues:
- description:
- - List of custom endpoints of service bus queue.
- type: complex
- returned: always
- contains:
- name:
- description:
- - Name of the custom endpoint.
- type: str
- returned: always
- sample: foo
- resource_group:
- description:
- - Resource group of the endpoint.
- type: str
- returned: always
- sample: bar
- subscription:
- description:
- - Subscription ID of the endpoint.
- type: str
- returned: always
- sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
- connection_string:
- description:
- - Connection string of the custom endpoint.
- type: str
- returned: always
- sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo"
- service_bus_topics:
- description:
- - List of custom endpoints of service bus topic.
- type: complex
- returned: always
- contains:
- name:
- description:
- - Name of the custom endpoint.
- type: str
- returned: always
- sample: foo
- resource_group:
- description:
- - Resource group of the endpoint.
- type: str
- returned: always
- sample: bar
- subscription:
- description:
- - Subscription ID of the endpoint.
- type: str
- returned: always
- sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
- connection_string:
- description:
- - Connection string of the custom endpoint.
- type: str
- returned: always
- sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo"
- storage_containers:
- description:
- - List of custom endpoints of storage.
- type: complex
- returned: always
- contains:
- name:
- description:
- - Name of the custom endpoint.
- type: str
- returned: always
- sample: foo
- resource_group:
- description:
- - Resource group of the endpoint.
- type: str
- returned: always
- sample: bar
- subscription:
- description:
- - Subscription ID of the endpoint.
- type: str
- returned: always
- sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
- connection_string:
- description:
- - Connection string of the custom endpoint.
- type: str
- returned: always
- sample: "Endpoint=sb://quux.servicebus.windows.net:5671/;SharedAccessKeyName=qux;SharedAccessKey=****;EntityPath=foo"
- routes:
- description:
- - Route device-to-cloud messages to service-facing endpoints.
- type: complex
- returned: always
- contains:
- name:
- description:
- - Name of the route.
- type: str
- returned: always
- sample: route1
- source:
- description:
- - The origin of the data stream to be acted upon.
- type: str
- returned: always
- sample: device_messages
- enabled:
- description:
- - Whether to enable the route.
- type: bool
- returned: always
- sample: true
- endpoint_name:
- description:
- - The name of the endpoint in I(routing_endpoints) where IoT Hub sends messages that match the query.
- type: str
- returned: always
- sample: foo
- condition:
- description:
- - "The query expression for the routing query that is run against the message application properties,
- system properties, message body, device twin tags, and device twin properties to determine if it is a match for the endpoint."
- - "For more information about constructing a query,
- see U(https://docs.microsoft.com/en-us/azure/iot-hub/iot-hub-devguide-routing-query-syntax)"
- type: bool
- returned: always
- sample: "true"
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- type: dict
- returned: always
- sample: { 'key1': 'value1' }
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _camel_to_snake
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.tools import parse_resource_id
- from azure.common import AzureHttpError
-except Exception:
- # handled in azure_rm_common
- pass
-
-
-class AzureRMIoTHubFacts(AzureRMModuleBase):
- """Utility class to get IoT Hub facts"""
-
- def __init__(self):
-
- self.module_args = dict(
- name=dict(type='str'),
- resource_group=dict(type='str'),
- tags=dict(type='list'),
- show_stats=dict(type='bool'),
- show_quota_metrics=dict(type='bool'),
- show_endpoint_health=dict(type='bool'),
- list_keys=dict(type='bool'),
- test_route_message=dict(type='str'),
- list_consumer_groups=dict(type='bool')
- )
-
- self.results = dict(
- changed=False,
- azure_iothubs=[]
- )
-
- self.name = None
- self.resource_group = None
- self.tags = None
- self.show_stats = None
- self.show_quota_metrics = None
- self.show_endpoint_health = None
- self.list_keys = None
- self.test_route_message = None
- self.list_consumer_groups = None
-
- super(AzureRMIoTHubFacts, self).__init__(
- derived_arg_spec=self.module_args,
- supports_tags=False,
- facts_module=True
- )
-
- def exec_module(self, **kwargs):
-
- for key in self.module_args:
- setattr(self, key, kwargs[key])
-
- response = []
- if self.name:
- response = self.get_item()
- elif self.resource_group:
- response = self.list_by_resource_group()
- else:
- response = self.list_all()
- self.results['iothubs'] = [self.to_dict(x) for x in response if self.has_tags(x.tags, self.tags)]
- return self.results
-
- def get_item(self):
- """Get a single IoT Hub"""
-
- self.log('Get properties for {0}'.format(self.name))
-
- item = None
-
- try:
- item = self.IoThub_client.iot_hub_resource.get(self.resource_group, self.name)
- return [item]
- except Exception as exc:
- self.fail('Error when getting IoT Hub {0}: {1}'.format(self.name, exc.message or str(exc)))
-
- def list_all(self):
- """Get all IoT Hubs"""
-
- self.log('List all IoT Hubs')
-
- try:
- return self.IoThub_client.iot_hub_resource.list_by_subscription()
- except Exception as exc:
- self.fail('Failed to list all IoT Hubs - {0}'.format(str(exc)))
-
- def list_by_resource_group(self):
- try:
- return self.IoThub_client.iot_hub_resource.list(self.resource_group)
- except Exception as exc:
- self.fail('Failed to list IoT Hub in resource group {0} - {1}'.format(self.resource_group, exc.message or str(exc)))
-
- def show_hub_stats(self, resource_group, name):
- try:
- return self.IoThub_client.iot_hub_resource.get_stats(resource_group, name).as_dict()
- except Exception as exc:
- self.fail('Failed to getting statistics for IoT Hub {0}/{1}: {2}'.format(resource_group, name, str(exc)))
-
- def show_hub_quota_metrics(self, resource_group, name):
- result = []
- try:
- resp = self.IoThub_client.iot_hub_resource.get_quota_metrics(resource_group, name)
- while True:
- result.append(resp.next().as_dict())
- except StopIteration:
- pass
- except Exception as exc:
- self.fail('Failed to getting quota metrics for IoT Hub {0}/{1}: {2}'.format(resource_group, name, str(exc)))
- return result
-
- def show_hub_endpoint_health(self, resource_group, name):
- result = []
- try:
- resp = self.IoThub_client.iot_hub_resource.get_endpoint_health(resource_group, name)
- while True:
- result.append(resp.next().as_dict())
- except StopIteration:
- pass
- except Exception as exc:
- self.fail('Failed to getting health for IoT Hub {0}/{1} routing endpoint: {2}'.format(resource_group, name, str(exc)))
- return result
-
- def test_all_routes(self, resource_group, name):
- try:
- return self.IoThub_client.iot_hub_resource.test_all_routes(self.test_route_message, resource_group, name).routes.as_dict()
- except Exception as exc:
- self.fail('Failed to getting statistics for IoT Hub {0}/{1}: {2}'.format(resource_group, name, str(exc)))
-
- def list_hub_keys(self, resource_group, name):
- result = []
- try:
- resp = self.IoThub_client.iot_hub_resource.list_keys(resource_group, name)
- while True:
- result.append(resp.next().as_dict())
- except StopIteration:
- pass
- except Exception as exc:
- self.fail('Failed to getting health for IoT Hub {0}/{1} routing endpoint: {2}'.format(resource_group, name, str(exc)))
- return result
-
- def list_event_hub_consumer_groups(self, resource_group, name, event_hub_endpoint='events'):
- result = []
- try:
- resp = self.IoThub_client.iot_hub_resource.list_event_hub_consumer_groups(resource_group, name, event_hub_endpoint)
- while True:
- cg = resp.next()
- result.append(dict(
- id=cg.id,
- name=cg.name
- ))
- except StopIteration:
- pass
- except Exception as exc:
- self.fail('Failed to listing consumer group for IoT Hub {0}/{1} routing endpoint: {2}'.format(resource_group, name, str(exc)))
- return result
-
- def route_to_dict(self, route):
- return dict(
- name=route.name,
- source=_camel_to_snake(route.source),
- endpoint_name=route.endpoint_names[0],
- enabled=route.is_enabled,
- condition=route.condition
- )
-
- def instance_dict_to_dict(self, instance_dict):
- result = dict()
- for key in instance_dict.keys():
- result[key] = instance_dict[key].as_dict()
- return result
-
- def to_dict(self, hub):
- result = dict()
- properties = hub.properties
- result['id'] = hub.id
- result['name'] = hub.name
- result['resource_group'] = parse_resource_id(hub.id).get('resource_group')
- result['location'] = hub.location
- result['tags'] = hub.tags
- result['unit'] = hub.sku.capacity
- result['sku'] = hub.sku.name.lower()
- result['cloud_to_device'] = dict(
- max_delivery_count=properties.cloud_to_device.feedback.max_delivery_count,
- ttl_as_iso8601=str(properties.cloud_to_device.feedback.ttl_as_iso8601)
- )
- result['enable_file_upload_notifications'] = properties.enable_file_upload_notifications
- result['event_hub_endpoints'] = self.instance_dict_to_dict(properties.event_hub_endpoints)
- result['host_name'] = properties.host_name
- result['ip_filters'] = [x.as_dict() for x in properties.ip_filter_rules]
- result['routing_endpoints'] = properties.routing.endpoints.as_dict()
- result['routes'] = [self.route_to_dict(x) for x in properties.routing.routes]
- result['fallback_route'] = self.route_to_dict(properties.routing.fallback_route)
- result['status'] = properties.state
- result['storage_endpoints'] = self.instance_dict_to_dict(properties.storage_endpoints)
-
- # network overhead part
- if self.show_stats:
- result['statistics'] = self.show_hub_stats(result['resource_group'], hub.name)
- if self.show_quota_metrics:
- result['quota_metrics'] = self.show_hub_quota_metrics(result['resource_group'], hub.name)
- if self.show_endpoint_health:
- result['endpoint_health'] = self.show_hub_endpoint_health(result['resource_group'], hub.name)
- if self.list_keys:
- result['keys'] = self.list_hub_keys(result['resource_group'], hub.name)
- if self.test_route_message:
- result['test_route_result'] = self.test_all_routes(result['resource_group'], hub.name)
- if self.list_consumer_groups:
- result['consumer_groups'] = self.list_event_hub_consumer_groups(result['resource_group'], hub.name)
- return result
-
-
-def main():
- """Main module execution code path"""
-
- AzureRMIoTHubFacts()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_iothubconsumergroup.py b/lib/ansible/modules/cloud/azure/azure_rm_iothubconsumergroup.py
deleted file mode 100644
index 75b5ad77c9..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_iothubconsumergroup.py
+++ /dev/null
@@ -1,169 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_iothubconsumergroup
-version_added: "2.9"
-short_description: Manage Azure IoT hub
-description:
- - Create, delete an Azure IoT hub.
-options:
- resource_group:
- description:
- - Name of resource group.
- type: str
- required: true
- hub:
- description:
- - Name of the IoT hub.
- type: str
- required: true
- state:
- description:
- - State of the IoT hub. Use C(present) to create or update an IoT hub and C(absent) to delete an IoT hub.
- type: str
- default: present
- choices:
- - absent
- - present
- event_hub:
- description:
- - Event hub endpoint name.
- type: str
- default: events
- name:
- description:
- - Name of the consumer group.
- type: str
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
-- name: Create an IoT hub consumer group
- azure_rm_iothubconsumergroup:
- name: test
- resource_group: myResourceGroup
- hub: Testing
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID of the consumer group.
- returned: success
- type: str
- sample: "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/myResourceGroup
- /providers/Microsoft.Devices/IotHubs/Testing/events/ConsumerGroups/%24Default"
-name:
- description:
- - Name of the consumer group.
- sample: Testing
- returned: success
- type: str
-''' # NOQA
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
-from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
-import re
-
-try:
- from msrestazure.tools import parse_resource_id
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMIoTHubConsumerGroup(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- hub=dict(type='str', required=True),
- event_hub=dict(type='str', default='events')
- )
-
- self.results = dict(
- changed=False,
- id=None
- )
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.hub = None
- self.event_hub = None
-
- super(AzureRMIoTHubConsumerGroup, self).__init__(self.module_arg_spec, supports_check_mode=True)
-
- def exec_module(self, **kwargs):
-
- for key in self.module_arg_spec.keys():
- setattr(self, key, kwargs[key])
-
- changed = False
- cg = self.get_cg()
- if not cg and self.state == 'present':
- changed = True
- if not self.check_mode:
- cg = self.create_cg()
- elif cg and self.state == 'absent':
- changed = True
- cg = None
- if not self.check_mode:
- self.delete_cg()
- self.results = dict(
- id=cg.id,
- name=cg.name
- ) if cg else dict()
- self.results['changed'] = changed
- return self.results
-
- def get_cg(self):
- try:
- return self.IoThub_client.iot_hub_resource.get_event_hub_consumer_group(self.resource_group, self.hub, self.event_hub, self.name)
- except Exception:
- pass
- return None
-
- def create_cg(self):
- try:
- return self.IoThub_client.iot_hub_resource.create_event_hub_consumer_group(self.resource_group, self.hub, self.event_hub, self.name)
- except Exception as exc:
- self.fail('Error when creating the consumer group {0} for IoT Hub {1} event hub {2}: {3}'.format(self.name, self.hub, self.event_hub, str(exc)))
-
- def delete_cg(self):
- try:
- return self.IoThub_client.iot_hub_resource.delete_event_hub_consumer_group(self.resource_group, self.hub, self.event_hub, self.name)
- except Exception as exc:
- self.fail('Error when deleting the consumer group {0} for IoT Hub {1} event hub {2}: {3}'.format(self.name, self.hub, self.event_hub, str(exc)))
-
-
-def main():
- AzureRMIoTHubConsumerGroup()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_keyvault.py b/lib/ansible/modules/cloud/azure/azure_rm_keyvault.py
deleted file mode 100644
index 70dcfedd8e..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_keyvault.py
+++ /dev/null
@@ -1,504 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_keyvault
-version_added: "2.5"
-short_description: Manage Key Vault instance
-description:
- - Create, update and delete instance of Key Vault.
-
-options:
- resource_group:
- description:
- - The name of the Resource Group to which the server belongs.
- required: True
- vault_name:
- description:
- - Name of the vault.
- required: True
- location:
- description:
- - Resource location. If not set, location from the resource group will be used as default.
- vault_tenant:
- description:
- - The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault.
- sku:
- description:
- - SKU details.
- suboptions:
- family:
- description:
- - SKU family name.
- name:
- description:
- - SKU name to specify whether the key vault is a standard vault or a premium vault.
- required: True
- choices:
- - 'standard'
- - 'premium'
- access_policies:
- description:
- - An array of 0 to 16 identities that have access to the key vault.
- - All identities in the array must use the same tenant ID as the key vault's tenant ID.
- suboptions:
- tenant_id:
- description:
- - The Azure Active Directory tenant ID that should be used for authenticating requests to the key vault.
- - Current keyvault C(tenant_id) value will be used if not specified.
- object_id:
- description:
- - The object ID of a user, service principal or security group in the Azure Active Directory tenant for the vault.
- - The object ID must be unique for the list of access policies.
- - Please note this is not application id. Object id can be obtained by running "az ad sp show --id <application id>".
- required: True
- application_id:
- description:
- - Application ID of the client making request on behalf of a principal.
- keys:
- description:
- - List of permissions to keys.
- choices:
- - 'encrypt'
- - 'decrypt'
- - 'wrapkey'
- - 'unwrapkey'
- - 'sign'
- - 'verify'
- - 'get'
- - 'list'
- - 'create'
- - 'update'
- - 'import'
- - 'delete'
- - 'backup'
- - 'restore'
- - 'recover'
- - 'purge'
- secrets:
- description:
- - List of permissions to secrets.
- choices:
- - 'get'
- - 'list'
- - 'set'
- - 'delete'
- - 'backup'
- - 'restore'
- - 'recover'
- - 'purge'
- certificates:
- description:
- - List of permissions to certificates.
- choices:
- - 'get'
- - 'list'
- - 'delete'
- - 'create'
- - 'import'
- - 'update'
- - 'managecontacts'
- - 'getissuers'
- - 'listissuers'
- - 'setissuers'
- - 'deleteissuers'
- - 'manageissuers'
- - 'recover'
- - 'purge'
- storage:
- description:
- - List of permissions to storage accounts.
- enabled_for_deployment:
- description:
- - Property to specify whether Azure Virtual Machines are permitted to retrieve certificates stored as secrets from the key vault.
- type: bool
- enabled_for_disk_encryption:
- description:
- - Property to specify whether Azure Disk Encryption is permitted to retrieve secrets from the vault and unwrap keys.
- type: bool
- enabled_for_template_deployment:
- description:
- - Property to specify whether Azure Resource Manager is permitted to retrieve secrets from the key vault.
- type: bool
- enable_soft_delete:
- description:
- - Property to specify whether the soft delete functionality is enabled for this key vault.
- type: bool
- recover_mode:
- description:
- - Create vault in recovery mode.
- type: bool
- state:
- description:
- - Assert the state of the KeyVault. Use C(present) to create or update an KeyVault and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Create instance of Key Vault
- azure_rm_keyvault:
- resource_group: myResourceGroup
- vault_name: samplekeyvault
- enabled_for_deployment: yes
- vault_tenant: 72f98888-8666-4144-9199-2d7cd0111111
- sku:
- name: standard
- access_policies:
- - tenant_id: 72f98888-8666-4144-9199-2d7cd0111111
- object_id: 99998888-8666-4144-9199-2d7cd0111111
- keys:
- - get
- - list
-'''
-
-RETURN = '''
-id:
- description:
- - The Azure Resource Manager resource ID for the key vault.
- returned: always
- type: str
- sample: id
-'''
-
-import collections
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.keyvault import KeyVaultManagementClient
- from msrest.polling import LROPoller
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMVaults(AzureRMModuleBase):
- """Configuration class for an Azure RM Key Vault resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- vault_name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- vault_tenant=dict(
- type='str'
- ),
- sku=dict(
- type='dict'
- ),
- access_policies=dict(
- type='list',
- elements='dict',
- options=dict(
- tenant_id=dict(type='str'),
- object_id=dict(type='str', required=True),
- application_id=dict(type='str'),
- # FUTURE: add `choices` support once choices supports lists of values
- keys=dict(type='list'),
- secrets=dict(type='list'),
- certificates=dict(type='list'),
- storage=dict(type='list')
- )
- ),
- enabled_for_deployment=dict(
- type='bool'
- ),
- enabled_for_disk_encryption=dict(
- type='bool'
- ),
- enabled_for_template_deployment=dict(
- type='bool'
- ),
- enable_soft_delete=dict(
- type='bool'
- ),
- recover_mode=dict(
- type='bool'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.module_required_if = [['state', 'present', ['vault_tenant']]]
-
- self.resource_group = None
- self.vault_name = None
- self.parameters = dict()
- self.tags = None
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMVaults, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True,
- required_if=self.module_required_if)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- # translate Ansible input to SDK-formatted dict in self.parameters
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- if key == "location":
- self.parameters["location"] = kwargs[key]
- elif key == "vault_tenant":
- self.parameters.setdefault("properties", {})["tenant_id"] = kwargs[key]
- elif key == "sku":
- self.parameters.setdefault("properties", {})["sku"] = kwargs[key]
- elif key == "access_policies":
- access_policies = kwargs[key]
- for policy in access_policies:
- if 'keys' in policy:
- policy.setdefault("permissions", {})["keys"] = policy["keys"]
- policy.pop("keys", None)
- if 'secrets' in policy:
- policy.setdefault("permissions", {})["secrets"] = policy["secrets"]
- policy.pop("secrets", None)
- if 'certificates' in policy:
- policy.setdefault("permissions", {})["certificates"] = policy["certificates"]
- policy.pop("certificates", None)
- if 'storage' in policy:
- policy.setdefault("permissions", {})["storage"] = policy["storage"]
- policy.pop("storage", None)
- if policy.get('tenant_id') is None:
- # default to key vault's tenant, since that's all that's currently supported anyway
- policy['tenant_id'] = kwargs['vault_tenant']
- self.parameters.setdefault("properties", {})["access_policies"] = access_policies
- elif key == "enabled_for_deployment":
- self.parameters.setdefault("properties", {})["enabled_for_deployment"] = kwargs[key]
- elif key == "enabled_for_disk_encryption":
- self.parameters.setdefault("properties", {})["enabled_for_disk_encryption"] = kwargs[key]
- elif key == "enabled_for_template_deployment":
- self.parameters.setdefault("properties", {})["enabled_for_template_deployment"] = kwargs[key]
- elif key == "enable_soft_delete":
- self.parameters.setdefault("properties", {})["enable_soft_delete"] = kwargs[key]
- elif key == "recover_mode":
- self.parameters.setdefault("properties", {})["create_mode"] = 'recover' if kwargs[key] else 'default'
-
- old_response = None
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(KeyVaultManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version="2018-02-14")
-
- resource_group = self.get_resource_group(self.resource_group)
-
- if "location" not in self.parameters:
- self.parameters["location"] = resource_group.location
-
- old_response = self.get_keyvault()
-
- if not old_response:
- self.log("Key Vault instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("Key Vault instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.log("Need to check if Key Vault instance has to be deleted or may be updated")
- if ('location' in self.parameters) and (self.parameters['location'] != old_response['location']):
- self.to_do = Actions.Update
- elif ('tenant_id' in self.parameters) and (self.parameters['tenant_id'] != old_response['tenant_id']):
- self.to_do = Actions.Update
- elif ('enabled_for_deployment' in self.parameters) and (self.parameters['enabled_for_deployment'] != old_response['enabled_for_deployment']):
- self.to_do = Actions.Update
- elif (('enabled_for_disk_encryption' in self.parameters) and
- (self.parameters['enabled_for_deployment'] != old_response['enabled_for_deployment'])):
- self.to_do = Actions.Update
- elif (('enabled_for_template_deployment' in self.parameters) and
- (self.parameters['enabled_for_template_deployment'] != old_response['enabled_for_template_deployment'])):
- self.to_do = Actions.Update
- elif ('enable_soft_delete' in self.parameters) and (self.parameters['enabled_soft_delete'] != old_response['enable_soft_delete']):
- self.to_do = Actions.Update
- elif ('create_mode' in self.parameters) and (self.parameters['create_mode'] != old_response['create_mode']):
- self.to_do = Actions.Update
- elif 'access_policies' in self.parameters['properties']:
- if len(self.parameters['properties']['access_policies']) != len(old_response['properties']['access_policies']):
- self.to_do = Actions.Update
- else:
- # FUTURE: this list isn't really order-dependent- we should be set-ifying the rules list for order-independent comparison
- for i in range(len(old_response['properties']['access_policies'])):
- n = self.parameters['properties']['access_policies'][i]
- o = old_response['properties']['access_policies'][i]
- if n.get('tenant_id', False) != o.get('tenant_id', False):
- self.to_do = Actions.Update
- break
- if n.get('object_id', None) != o.get('object_id', None):
- self.to_do = Actions.Update
- break
- if n.get('application_id', None) != o.get('application_id', None):
- self.to_do = Actions.Update
- break
- if sorted(n.get('keys', [])) != sorted(o.get('keys', [])):
- self.to_do = Actions.Update
- break
- if sorted(n.get('secrets', [])) != sorted(o.get('secrets', [])):
- self.to_do = Actions.Update
- break
- if sorted(n.get('certificates', [])) != sorted(o.get('certificates', [])):
- self.to_do = Actions.Update
- break
- if sorted(n.get('storage', [])) != sorted(o.get('storage', [])):
- self.to_do = Actions.Update
- break
-
- update_tags, newtags = self.update_tags(old_response.get('tags', dict()))
-
- if update_tags:
- self.to_do = Actions.Update
- self.tags = newtags
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the Key Vault instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- self.parameters["tags"] = self.tags
-
- response = self.create_update_keyvault()
-
- if not old_response:
- self.results['changed'] = True
- else:
- self.results['changed'] = old_response.__ne__(response)
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("Key Vault instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_keyvault()
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_keyvault():
- time.sleep(20)
- else:
- self.log("Key Vault instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
-
- return self.results
-
- def create_update_keyvault(self):
- '''
- Creates or updates Key Vault with the specified configuration.
-
- :return: deserialized Key Vault instance state dictionary
- '''
- self.log("Creating / Updating the Key Vault instance {0}".format(self.vault_name))
-
- try:
- response = self.mgmt_client.vaults.create_or_update(resource_group_name=self.resource_group,
- vault_name=self.vault_name,
- parameters=self.parameters)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the Key Vault instance.')
- self.fail("Error creating the Key Vault instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_keyvault(self):
- '''
- Deletes specified Key Vault instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the Key Vault instance {0}".format(self.vault_name))
- try:
- response = self.mgmt_client.vaults.delete(resource_group_name=self.resource_group,
- vault_name=self.vault_name)
- except CloudError as e:
- self.log('Error attempting to delete the Key Vault instance.')
- self.fail("Error deleting the Key Vault instance: {0}".format(str(e)))
-
- return True
-
- def get_keyvault(self):
- '''
- Gets the properties of the specified Key Vault.
-
- :return: deserialized Key Vault instance state dictionary
- '''
- self.log("Checking if the Key Vault instance {0} is present".format(self.vault_name))
- found = False
- try:
- response = self.mgmt_client.vaults.get(resource_group_name=self.resource_group,
- vault_name=self.vault_name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("Key Vault instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the Key Vault instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMVaults()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_keyvault_info.py b/lib/ansible/modules/cloud/azure/azure_rm_keyvault_info.py
deleted file mode 100644
index 156f4a498c..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_keyvault_info.py
+++ /dev/null
@@ -1,323 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yunge Zhu, <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_keyvault_info
-version_added: "2.9"
-short_description: Get Azure Key Vault facts
-description:
- - Get facts of Azure Key Vault.
-
-options:
- resource_group:
- description:
- - The name of the resource group to which the key vault belongs.
- name:
- description:
- - The name of the key vault.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yunge Zhu (@yungezz)
-
-'''
-
-EXAMPLES = '''
- - name: Get Key Vault by name
- azure_rm_keyvault_info:
- resource_group: myResourceGroup
- name: myVault
-
- - name: List Key Vaults in specific resource group
- azure_rm_keyvault_info:
- resource_group: myResourceGroup
-
- - name: List Key Vaults in current subscription
- azure_rm_keyvault_info:
-'''
-
-RETURN = '''
-keyvaults:
- description: List of Azure Key Vaults.
- returned: always
- type: list
- contains:
- name:
- description:
- - Name of the vault.
- returned: always
- type: str
- sample: myVault
- id:
- description:
- - Resource Id of the vault.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.KeyVault/vaults/myVault
- vault_uri:
- description:
- - Vault uri.
- returned: always
- type: str
- sample: https://myVault.vault.azure.net/
- location:
- description:
- - Location of the vault.
- returned: always
- type: str
- sample: eastus
- enabled_for_deployments:
- description:
- - Whether Azure Virtual Machines are permitted to retrieve certificates stored as secrets from the key vault.
- returned: always
- type: bool
- sample: False
- enabled_for_disk_encryption:
- description:
- - Whether Azure Disk Encryption is permitted to retrieve secrets from the vault and unwrap keys.
- returned: always
- type: bool
- sample: False
- enabled_for_template_deployment:
- description:
- - Whether Azure Resource Manager is permitted to retrieve secrets from the key vault.
- returned: always
- type: bool
- sample: False
- tags:
- description:
- - List of tags.
- type: list
- sample:
- - foo
- sku:
- description:
- - Sku of the vault.
- returned: always
- type: dict
- contains:
- family:
- description: Sku family name.
- type: str
- returned: always
- sample: A
- name:
- description: Sku name.
- type: str
- returned: always
- sample: standard
- access_policies:
- description:
- - Location of the vault.
- returned: always
- type: list
- contains:
- object_id:
- description: The object if of a user, service principal or security group in AAD for the vault.
- type: str
- returned: always
- sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- tenant_id:
- description: The AAD tenant iD that should be used for authenticating requests to the key vault.
- type: str
- returned: always
- sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- permissions:
- description: Permissions the identity has for keys, secrets and certificates.
- type: complex
- returned: always
- contains:
- keys:
- description:
- Permissions to keys.
- type: list
- returned: always
- sample:
- - get
- - create
- secrets:
- description:
- Permissions to secrets.
- type: list
- returned: always
- sample:
- - list
- - set
- certificates:
- description:
- Permissions to secrets.
- type: list
- returned: always
- sample:
- - get
- - import
-'''
-
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from azure.mgmt.keyvault import KeyVaultManagementClient
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def keyvault_to_dict(vault):
- return dict(
- id=vault.id,
- name=vault.name,
- location=vault.location,
- tags=vault.tags,
- vault_uri=vault.properties.vault_uri,
- enabled_for_deployment=vault.properties.enabled_for_deployment,
- enabled_for_disk_encryption=vault.properties.enabled_for_disk_encryption,
- enabled_for_template_deployment=vault.properties.enabled_for_template_deployment,
- access_policies=[dict(
- tenant_id=policy.tenant_id,
- object_id=policy.object_id,
- permissions=dict(
- keys=[kp.lower() for kp in policy.permissions.keys] if policy.permissions.keys else None,
- secrets=[sp.lower() for sp in policy.permissions.secrets] if policy.permissions.secrets else None,
- certificates=[cp.lower() for cp in policy.permissions.certificates] if policy.permissions.certificates else None
- ) if policy.permissions else None,
- ) for policy in vault.properties.access_policies] if vault.properties.access_policies else None,
- sku=dict(
- family=vault.properties.sku.family,
- name=vault.properties.sku.name.name
- )
- )
-
-
-class AzureRMKeyVaultInfo(AzureRMModuleBase):
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(type='str'),
- name=dict(type='str'),
- tags=dict(type='list')
- )
-
- self.resource_group = None
- self.name = None
- self.tags = None
-
- self.results = dict(changed=False)
- self._client = None
-
- super(AzureRMKeyVaultInfo, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=False,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
-
- self._client = self.get_mgmt_svc_client(KeyVaultManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version="2018-02-14")
-
- if self.name:
- if self.resource_group:
- self.results['keyvaults'] = self.get_by_name()
- else:
- self.fail("resource_group is required when filtering by name")
- elif self.resource_group:
- self.results['keyvaults'] = self.list_by_resource_group()
- else:
- self.results['keyvaults'] = self.list()
-
- return self.results
-
- def get_by_name(self):
- '''
- Gets the properties of the specified key vault.
-
- :return: deserialized key vaultstate dictionary
- '''
- self.log("Get the key vault {0}".format(self.name))
-
- results = []
- try:
- response = self._client.vaults.get(resource_group_name=self.resource_group,
- vault_name=self.name)
- self.log("Response : {0}".format(response))
-
- if response and self.has_tags(response.tags, self.tags):
- results.append(keyvault_to_dict(response))
- except CloudError as e:
- self.log("Did not find the key vault {0}: {1}".format(self.name, str(e)))
- return results
-
- def list_by_resource_group(self):
- '''
- Lists the properties of key vaults in specific resource group.
-
- :return: deserialized key vaults state dictionary
- '''
- self.log("Get the key vaults in resource group {0}".format(self.resource_group))
-
- results = []
- try:
- response = list(self._client.vaults.list_by_resource_group(resource_group_name=self.resource_group))
- self.log("Response : {0}".format(response))
-
- if response:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(keyvault_to_dict(item))
- except CloudError as e:
- self.log("Did not find key vaults in resource group {0} : {1}.".format(self.resource_group, str(e)))
- return results
-
- def list(self):
- '''
- Lists the properties of key vaults in specific subscription.
-
- :return: deserialized key vaults state dictionary
- '''
- self.log("Get the key vaults in current subscription")
-
- results = []
- try:
- response = list(self._client.vaults.list())
- self.log("Response : {0}".format(response))
-
- if response:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(keyvault_to_dict(item))
- except CloudError as e:
- self.log("Did not find key vault in current subscription {0}.".format(str(e)))
- return results
-
-
-def main():
- """Main execution"""
- AzureRMKeyVaultInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_keyvaultkey.py b/lib/ansible/modules/cloud/azure/azure_rm_keyvaultkey.py
deleted file mode 100644
index 8a3b7722cc..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_keyvaultkey.py
+++ /dev/null
@@ -1,310 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_keyvaultkey
-version_added: 2.5
-short_description: Use Azure KeyVault keys
-description:
- - Create or delete a key within a given keyvault.
- - By using Key Vault, you can encrypt keys and secrets.
- - Such as authentication keys, storage account keys, data encryption keys, .PFX files, and passwords.
-options:
- keyvault_uri:
- description:
- - URI of the keyvault endpoint.
- required: true
- key_name:
- description:
- - Name of the keyvault key.
- required: true
- byok_file:
- description:
- - BYOK file.
- pem_file:
- description:
- - PEM file.
- pem_password:
- description:
- - PEM password.
- state:
- description:
- - Assert the state of the key. Use C(present) to create a key and C(absent) to delete a key.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Ian Philpot (@iphilpot)
-
-'''
-
-EXAMPLES = '''
- - name: Create a key
- azure_rm_keyvaultkey:
- key_name: MyKey
- keyvault_uri: https://contoso.vault.azure.net/
-
- - name: Delete a key
- azure_rm_keyvaultkey:
- key_name: MyKey
- keyvault_uri: https://contoso.vault.azure.net/
- state: absent
-'''
-
-RETURN = '''
-state:
- description:
- - Current state of the key.
- returned: success
- type: complex
- contains:
- key_id:
- description:
- - key resource path.
- type: str
- example: https://contoso.vault.azure.net/keys/hello/e924f053839f4431b35bc54393f98423
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- import re
- import codecs
- from azure.keyvault import KeyVaultClient, KeyVaultId, KeyVaultAuthentication
- from azure.keyvault.models import KeyAttributes, JsonWebKey
- from azure.common.credentials import ServicePrincipalCredentials
- from azure.keyvault.models.key_vault_error import KeyVaultErrorException
- from msrestazure.azure_active_directory import MSIAuthentication
- from OpenSSL import crypto
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMKeyVaultKey(AzureRMModuleBase):
- ''' Module that creates or deletes keys in Azure KeyVault '''
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- key_name=dict(type='str', required=True),
- keyvault_uri=dict(type='str', required=True),
- pem_file=dict(type='str'),
- pem_password=dict(type='str'),
- byok_file=dict(type='str'),
- state=dict(type='str', default='present', choices=['present', 'absent'])
- )
-
- self.results = dict(
- changed=False,
- state=dict()
- )
-
- self.key_name = None
- self.keyvault_uri = None
- self.pem_file = None
- self.pem_password = None
- self.state = None
- self.client = None
- self.tags = None
-
- required_if = [
- ('pem_password', 'present', ['pem_file'])
- ]
-
- super(AzureRMKeyVaultKey, self).__init__(self.module_arg_spec,
- supports_check_mode=True,
- required_if=required_if,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- # Create KeyVaultClient
- self.client = self.get_keyvault_client()
-
- results = dict()
- changed = False
-
- try:
- results['key_id'] = self.get_key(self.key_name)
-
- # Key exists and will be deleted
- if self.state == 'absent':
- changed = True
-
- except KeyVaultErrorException:
- # Key doesn't exist
- if self.state == 'present':
- changed = True
-
- self.results['changed'] = changed
- self.results['state'] = results
-
- if not self.check_mode:
-
- # Create key
- if self.state == 'present' and changed:
- results['key_id'] = self.create_key(self.key_name, self.tags)
- self.results['state'] = results
- self.results['state']['status'] = 'Created'
- # Delete key
- elif self.state == 'absent' and changed:
- results['key_id'] = self.delete_key(self.key_name)
- self.results['state'] = results
- self.results['state']['status'] = 'Deleted'
- else:
- if self.state == 'present' and changed:
- self.results['state']['status'] = 'Created'
- elif self.state == 'absent' and changed:
- self.results['state']['status'] = 'Deleted'
-
- return self.results
-
- def get_keyvault_client(self):
- try:
- self.log("Get KeyVaultClient from MSI")
- credentials = MSIAuthentication(resource='https://vault.azure.net')
- return KeyVaultClient(credentials)
- except Exception:
- self.log("Get KeyVaultClient from service principal")
-
- # Create KeyVault Client using KeyVault auth class and auth_callback
- def auth_callback(server, resource, scope):
- if self.credentials['client_id'] is None or self.credentials['secret'] is None:
- self.fail('Please specify client_id, secret and tenant to access azure Key Vault.')
-
- tenant = self.credentials.get('tenant')
- if not self.credentials['tenant']:
- tenant = "common"
-
- authcredential = ServicePrincipalCredentials(
- client_id=self.credentials['client_id'],
- secret=self.credentials['secret'],
- tenant=tenant,
- cloud_environment=self._cloud_environment,
- resource="https://vault.azure.net")
-
- token = authcredential.token
- return token['token_type'], token['access_token']
-
- return KeyVaultClient(KeyVaultAuthentication(auth_callback))
-
- def get_key(self, name, version=''):
- ''' Gets an existing key '''
- key_bundle = self.client.get_key(self.keyvault_uri, name, version)
- if key_bundle:
- key_id = KeyVaultId.parse_key_id(key_bundle.key.kid)
- return key_id.id
-
- def create_key(self, name, tags, kty='RSA'):
- ''' Creates a key '''
- key_bundle = self.client.create_key(vault_base_url=self.keyvault_uri, key_name=name, kty=kty, tags=tags)
- key_id = KeyVaultId.parse_key_id(key_bundle.key.kid)
- return key_id.id
-
- def delete_key(self, name):
- ''' Deletes a key '''
- deleted_key = self.client.delete_key(self.keyvault_uri, name)
- key_id = KeyVaultId.parse_key_id(deleted_key.key.kid)
- return key_id.id
-
- def import_key(self, key_name, destination=None, key_ops=None, disabled=False, expires=None,
- not_before=None, tags=None, pem_file=None, pem_password=None, byok_file=None):
- """ Import a private key. Supports importing base64 encoded private keys from PEM files.
- Supports importing BYOK keys into HSM for premium KeyVaults. """
-
- def _to_bytes(hex_string):
- # zero pads and decodes a hex string
- if len(hex_string) % 2:
- hex_string = '{0}'.format(hex_string)
- return codecs.decode(hex_string, 'hex_codec')
-
- def _set_rsa_parameters(dest, src):
- # map OpenSSL parameter names to JsonWebKey property names
- conversion_dict = {
- 'modulus': 'n',
- 'publicExponent': 'e',
- 'privateExponent': 'd',
- 'prime1': 'p',
- 'prime2': 'q',
- 'exponent1': 'dp',
- 'exponent2': 'dq',
- 'coefficient': 'qi'
- }
- # regex: looks for matches that fit the following patterns:
- # integerPattern: 65537 (0x10001)
- # hexPattern:
- # 00:a0:91:4d:00:23:4a:c6:83:b2:1b:4c:15:d5:be:
- # d8:87:bd:c9:59:c2:e5:7a:f5:4a:e7:34:e8:f0:07:
- # The desired match should always be the first component of the match
- regex = re.compile(r'([^:\s]*(:[^\:)]+\))|([^:\s]*(:\s*[0-9A-Fa-f]{2})+))')
- # regex2: extracts the hex string from a format like: 65537 (0x10001)
- regex2 = re.compile(r'(?<=\(0x{1})([0-9A-Fa-f]*)(?=\))')
-
- key_params = crypto.dump_privatekey(crypto.FILETYPE_TEXT, src).decode('utf-8')
- for match in regex.findall(key_params):
- comps = match[0].split(':', 1)
- name = conversion_dict.get(comps[0], None)
- if name:
- value = comps[1].replace(' ', '').replace('\n', '').replace(':', '')
- try:
- value = _to_bytes(value)
- except Exception: # pylint:disable=broad-except
- # if decoding fails it is because of an integer pattern. Extract the hex
- # string and retry
- value = _to_bytes(regex2.findall(value)[0])
- setattr(dest, name, value)
-
- key_attrs = KeyAttributes(not disabled, not_before, expires)
- key_obj = JsonWebKey(key_ops=key_ops)
- if pem_file:
- key_obj.kty = 'RSA'
- with open(pem_file, 'r') as f:
- pem_data = f.read()
- # load private key and prompt for password if encrypted
- try:
- pem_password = str(pem_password).encode() if pem_password else None
- # despite documentation saying password should be a string, it needs to actually
- # be UTF-8 encoded bytes
- pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, pem_data, pem_password)
- except crypto.Error:
- pass # wrong password
- except TypeError:
- pass # no pass provided
- _set_rsa_parameters(key_obj, pkey)
- elif byok_file:
- with open(byok_file, 'rb') as f:
- byok_data = f.read()
- key_obj.kty = 'RSA-HSM'
- key_obj.t = byok_data
-
- return self.client.import_key(
- self.keyvault_uri, key_name, key_obj, destination == 'hsm', key_attrs, tags)
-
-
-def main():
- AzureRMKeyVaultKey()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_keyvaultkey_info.py b/lib/ansible/modules/cloud/azure/azure_rm_keyvaultkey_info.py
deleted file mode 100644
index 14251e8748..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_keyvaultkey_info.py
+++ /dev/null
@@ -1,466 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yunge Zhu, <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_keyvaultkey_info
-version_added: "2.9"
-short_description: Get Azure Key Vault key facts
-description:
- - Get facts of Azure Key Vault key.
-
-options:
- vault_uri:
- description:
- - Vault uri where the key stored in.
- required: True
- type: str
- name:
- description:
- - Key name. If not set, will list all keys in I(vault_uri).
- type: str
- version:
- description:
- - Key version.
- - Set it to C(current) to show latest version of a key.
- - Set it to C(all) to list all versions of a key.
- - Set it to specific version to list specific version of a key. eg. fd2682392a504455b79c90dd04a1bf46.
- default: current
- type: str
- show_deleted_key:
- description:
- - Set to C(true) to show deleted keys. Set to C(false) to show not deleted keys.
- type: bool
- default: false
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- type: list
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yunge Zhu (@yungezz)
-
-'''
-
-EXAMPLES = '''
- - name: Get latest version of specific key
- azure_rm_keyvaultkey_info:
- vault_uri: "https://myVault.vault.azure.net"
- name: myKey
-
- - name: List all versions of specific key
- azure_rm_keyvaultkey_info:
- vault_uri: "https://myVault.vault.azure.net"
- name: myKey
- version: all
-
- - name: List specific version of specific key
- azure_rm_keyvaultkey_info:
- vault_uri: "https://myVault.vault.azure.net"
- name: myKey
- version: fd2682392a504455b79c90dd04a1bf46
-
- - name: List all keys in specific key vault
- azure_rm_keyvaultkey_info:
- vault_uri: "https://myVault.vault.azure.net"
-
- - name: List deleted keys in specific key vault
- azure_rm_keyvaultkey_info:
- vault_uri: "https://myVault.vault.azure.net"
- show_deleted_key: True
-'''
-
-RETURN = '''
-keyvaults:
- description:
- - List of keys in Azure Key Vault.
- returned: always
- type: complex
- contains:
- kid:
- description:
- - Key identifier.
- returned: always
- type: str
- sample: "https://myVault.vault.azure.net/keys/key1/fd2682392a504455b79c90dd04a1bf46"
- permitted_operations:
- description:
- - Permitted operations on the key.
- type: list
- returned: always
- sample: encrypt
- type:
- description:
- - Key type.
- type: str
- returned: always
- sample: RSA
- version:
- description:
- - Key version.
- type: str
- returned: always
- sample: fd2682392a504455b79c90dd04a1bf46
- key:
- description:
- - public part of a key.
- contains:
- n:
- description:
- - RSA modules.
- type: str
- e:
- description:
- - RSA public exponent.
- type: str
- crv:
- description:
- - Elliptic curve name.
- type: str
- x:
- description:
- - X component of an EC public key.
- type: str
- y:
- description:
- - Y component of an EC public key.
- type: str
- managed:
- description:
- - C(True) if the key's lifetime is managed by key vault.
- type: bool
- sample: True
- tags:
- description:
- - Tags of the key.
- returned: always
- type: list
- sample: [foo, ]
- attributes:
- description:
- - Key attributes.
- contains:
- created:
- description:
- - Creation datetime.
- returned: always
- type: str
- sample: "2019-04-25T07:26:49+00:00"
- not_before:
- description:
- - Not before datetime.
- type: str
- sample: "2019-04-25T07:26:49+00:00"
- expires:
- description:
- - Expiration datetime.
- type: str
- sample: "2019-04-25T07:26:49+00:00"
- updated:
- description:
- - Update datetime.
- returned: always
- type: str
- sample: "2019-04-25T07:26:49+00:00"
- enabled:
- description:
- - Indicate whether the key is enabled.
- returned: always
- type: str
- sample: true
- recovery_level:
- description:
- - Reflects the deletion recovery level currently in effect for keys in the current vault.
- - If it contains C(Purgeable) the key can be permanently deleted by a privileged user.
- - Otherwise, only the system can purge the key, at the end of the retention interval.
- returned: always
- type: str
- sample: Purgable
-'''
-
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from azure.keyvault import KeyVaultClient, KeyVaultId, KeyVaultAuthentication, KeyId
- from azure.keyvault.models import KeyAttributes, JsonWebKey
- from azure.common.credentials import ServicePrincipalCredentials
- from azure.keyvault.models.key_vault_error import KeyVaultErrorException
- from msrestazure.azure_active_directory import MSIAuthentication
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def keybundle_to_dict(bundle):
- return dict(
- tags=bundle.tags,
- managed=bundle.managed,
- attributes=dict(
- enabled=bundle.attributes.enabled,
- not_before=bundle.attributes.not_before,
- expires=bundle.attributes.expires,
- created=bundle.attributes.created,
- updated=bundle.attributes.updated,
- recovery_level=bundle.attributes.recovery_level
- ),
- kid=bundle.key.kid,
- version=KeyVaultId.parse_key_id(bundle.key.kid).version,
- type=bundle.key.kty,
- permitted_operations=bundle.key.key_ops,
- key=dict(
- n=bundle.key.n if hasattr(bundle.key, 'n') else None,
- e=bundle.key.e if hasattr(bundle.key, 'e') else None,
- crv=bundle.key.crv if hasattr(bundle.key, 'crv') else None,
- x=bundle.key.x if hasattr(bundle.key, 'x') else None,
- y=bundle.k.y if hasattr(bundle.key, 'y') else None
- )
- )
-
-
-def deletedkeybundle_to_dict(bundle):
- keybundle = keybundle_to_dict(bundle)
- keybundle['recovery_id'] = bundle.recovery_id,
- keybundle['scheduled_purge_date'] = bundle.scheduled_purge_date,
- keybundle['deleted_date'] = bundle.deleted_date
- return keybundle
-
-
-def keyitem_to_dict(keyitem):
- return dict(
- kid=keyitem.kid,
- version=KeyVaultId.parse_key_id(keyitem.kid).version,
- tags=keyitem.tags,
- manged=keyitem.managed,
- attributes=dict(
- enabled=keyitem.attributes.enabled,
- not_before=keyitem.attributes.not_before,
- expires=keyitem.attributes.expires,
- created=keyitem.attributes.created,
- updated=keyitem.attributes.updated,
- recovery_level=keyitem.attributes.recovery_level
- )
- )
-
-
-def deletedkeyitem_to_dict(keyitem):
- item = keyitem_to_dict(keyitem)
- item['recovery_id'] = keyitem.recovery_id,
- item['scheduled_purge_date'] = keyitem.scheduled_purge_date,
- item['deleted_date'] = keyitem.deleted_date
- return item
-
-
-class AzureRMKeyVaultKeyInfo(AzureRMModuleBase):
-
- def __init__(self):
- self.module_arg_spec = dict(
- version=dict(type='str', default='current'),
- name=dict(type='str'),
- vault_uri=dict(type='str', required=True),
- show_deleted_key=dict(type='bool', default=False),
- tags=dict(type='list')
- )
-
- self.vault_uri = None
- self.name = None
- self.version = None
- self.show_deleted_key = False
- self.tags = None
-
- self.results = dict(changed=False)
- self._client = None
-
- super(AzureRMKeyVaultKeyInfo, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=False,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
-
- self._client = self.get_keyvault_client()
-
- if self.name:
- if self.show_deleted_key:
- self.results['keys'] = self.get_deleted_key()
- else:
- if self.version == 'all':
- self.results['keys'] = self.get_key_versions()
- else:
- self.results['keys'] = self.get_key()
- else:
- if self.show_deleted_key:
- self.results['keys'] = self.list_deleted_keys()
- else:
- self.results['keys'] = self.list_keys()
-
- return self.results
-
- def get_keyvault_client(self):
- try:
- self.log("Get KeyVaultClient from MSI")
- credentials = MSIAuthentication(resource='https://vault.azure.net')
- return KeyVaultClient(credentials)
- except Exception:
- self.log("Get KeyVaultClient from service principal")
-
- # Create KeyVault Client using KeyVault auth class and auth_callback
- def auth_callback(server, resource, scope):
- if self.credentials['client_id'] is None or self.credentials['secret'] is None:
- self.fail('Please specify client_id, secret and tenant to access azure Key Vault.')
-
- tenant = self.credentials.get('tenant')
- if not self.credentials['tenant']:
- tenant = "common"
-
- authcredential = ServicePrincipalCredentials(
- client_id=self.credentials['client_id'],
- secret=self.credentials['secret'],
- tenant=tenant,
- cloud_environment=self._cloud_environment,
- resource="https://vault.azure.net")
-
- token = authcredential.token
- return token['token_type'], token['access_token']
-
- return KeyVaultClient(KeyVaultAuthentication(auth_callback))
-
- def get_key(self):
- '''
- Gets the properties of the specified key in key vault.
-
- :return: deserialized key state dictionary
- '''
- self.log("Get the key {0}".format(self.name))
-
- results = []
- try:
- if self.version == 'current':
- response = self._client.get_key(vault_base_url=self.vault_uri,
- key_name=self.name,
- key_version='')
- else:
- response = self._client.get_key(vault_base_url=self.vault_uri,
- key_name=self.name,
- key_version=self.version)
-
- if response and self.has_tags(response.tags, self.tags):
- self.log("Response : {0}".format(response))
- results.append(keybundle_to_dict(response))
-
- except KeyVaultErrorException as e:
- self.log("Did not find the key vault key {0}: {1}".format(self.name, str(e)))
- return results
-
- def get_key_versions(self):
- '''
- Lists keys versions.
-
- :return: deserialized versions of key, includes key identifier, attributes and tags
- '''
- self.log("Get the key versions {0}".format(self.name))
-
- results = []
- try:
- response = self._client.get_key_versions(vault_base_url=self.vault_uri,
- key_name=self.name)
- self.log("Response : {0}".format(response))
-
- if response:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(keyitem_to_dict(item))
- except KeyVaultErrorException as e:
- self.log("Did not find key versions {0} : {1}.".format(self.name, str(e)))
- return results
-
- def list_keys(self):
- '''
- Lists keys in specific key vault.
-
- :return: deserialized keys, includes key identifier, attributes and tags.
- '''
- self.log("Get the key vaults in current subscription")
-
- results = []
- try:
- response = self._client.get_keys(vault_base_url=self.vault_uri)
- self.log("Response : {0}".format(response))
-
- if response:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(keyitem_to_dict(item))
- except KeyVaultErrorException as e:
- self.log("Did not find key vault in current subscription {0}.".format(str(e)))
- return results
-
- def get_deleted_key(self):
- '''
- Gets the properties of the specified deleted key in key vault.
-
- :return: deserialized key state dictionary
- '''
- self.log("Get the key {0}".format(self.name))
-
- results = []
- try:
- response = self._client.get_deleted_key(vault_base_url=self.vault_uri,
- key_name=self.name)
-
- if response and self.has_tags(response.tags, self.tags):
- self.log("Response : {0}".format(response))
- results.append(deletedkeybundle_to_dict(response))
-
- except KeyVaultErrorException as e:
- self.log("Did not find the key vault key {0}: {1}".format(self.name, str(e)))
- return results
-
- def list_deleted_keys(self):
- '''
- Lists deleted keys in specific key vault.
-
- :return: deserialized keys, includes key identifier, attributes and tags.
- '''
- self.log("Get the key vaults in current subscription")
-
- results = []
- try:
- response = self._client.get_deleted_keys(vault_base_url=self.vault_uri)
- self.log("Response : {0}".format(response))
-
- if response:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(deletedkeyitem_to_dict(item))
- except KeyVaultErrorException as e:
- self.log("Did not find key vault in current subscription {0}.".format(str(e)))
- return results
-
-
-def main():
- """Main execution"""
- AzureRMKeyVaultKeyInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_keyvaultsecret.py b/lib/ansible/modules/cloud/azure/azure_rm_keyvaultsecret.py
deleted file mode 100644
index 957bbb6c96..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_keyvaultsecret.py
+++ /dev/null
@@ -1,231 +0,0 @@
-#!/usr/bin/python
-# Copyright: Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_keyvaultsecret
-version_added: 2.5
-short_description: Use Azure KeyVault Secrets
-description:
- - Create or delete a secret within a given keyvault.
- - By using Key Vault, you can encrypt keys and secrets.
- - Such as authentication keys, storage account keys, data encryption keys, .PFX files, and passwords.
-options:
- keyvault_uri:
- description:
- - URI of the keyvault endpoint.
- required: true
- secret_name:
- description:
- - Name of the keyvault secret.
- required: true
- secret_value:
- description:
- - Secret to be secured by keyvault.
- state:
- description:
- - Assert the state of the subnet. Use C(present) to create or update a secret and C(absent) to delete a secret .
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Ian Philpot (@iphilpot)
-
-'''
-
-EXAMPLES = '''
- - name: Create a secret
- azure_rm_keyvaultsecret:
- secret_name: MySecret
- secret_value: My_Pass_Sec
- keyvault_uri: https://contoso.vault.azure.net/
- tags:
- testing: testing
- delete: never
-
- - name: Delete a secret
- azure_rm_keyvaultsecret:
- secret_name: MySecret
- keyvault_uri: https://contoso.vault.azure.net/
- state: absent
-'''
-
-RETURN = '''
-state:
- description:
- - Current state of the secret.
- returned: success
- type: complex
- contains:
- secret_id:
- description:
- - Secret resource path.
- type: str
- example: https://contoso.vault.azure.net/secrets/hello/e924f053839f4431b35bc54393f98423
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from azure.keyvault import KeyVaultClient, KeyVaultAuthentication, KeyVaultId
- from azure.common.credentials import ServicePrincipalCredentials
- from azure.keyvault.models.key_vault_error import KeyVaultErrorException
- from msrestazure.azure_active_directory import MSIAuthentication
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMKeyVaultSecret(AzureRMModuleBase):
- ''' Module that creates or deletes secrets in Azure KeyVault '''
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- secret_name=dict(type='str', required=True),
- secret_value=dict(type='str', no_log=True),
- keyvault_uri=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent'])
- )
-
- required_if = [
- ('state', 'present', ['secret_value'])
- ]
-
- self.results = dict(
- changed=False,
- state=dict()
- )
-
- self.secret_name = None
- self.secret_value = None
- self.keyvault_uri = None
- self.state = None
- self.data_creds = None
- self.client = None
- self.tags = None
-
- super(AzureRMKeyVaultSecret, self).__init__(self.module_arg_spec,
- supports_check_mode=True,
- required_if=required_if,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- # Create KeyVault Client
- self.client = self.get_keyvault_client()
-
- results = dict()
- changed = False
-
- try:
- results = self.get_secret(self.secret_name)
-
- # Secret exists and will be deleted
- if self.state == 'absent':
- changed = True
- elif self.secret_value and results['secret_value'] != self.secret_value:
- changed = True
-
- except KeyVaultErrorException:
- # Secret doesn't exist
- if self.state == 'present':
- changed = True
-
- self.results['changed'] = changed
- self.results['state'] = results
-
- if not self.check_mode:
- # Create secret
- if self.state == 'present' and changed:
- results['secret_id'] = self.create_update_secret(self.secret_name, self.secret_value, self.tags)
- self.results['state'] = results
- self.results['state']['status'] = 'Created'
- # Delete secret
- elif self.state == 'absent' and changed:
- results['secret_id'] = self.delete_secret(self.secret_name)
- self.results['state'] = results
- self.results['state']['status'] = 'Deleted'
- else:
- if self.state == 'present' and changed:
- self.results['state']['status'] = 'Created'
- elif self.state == 'absent' and changed:
- self.results['state']['status'] = 'Deleted'
-
- return self.results
-
- def get_keyvault_client(self):
- try:
- self.log("Get KeyVaultClient from MSI")
- credentials = MSIAuthentication(resource='https://vault.azure.net')
- return KeyVaultClient(credentials)
- except Exception:
- self.log("Get KeyVaultClient from service principal")
-
- # Create KeyVault Client using KeyVault auth class and auth_callback
- def auth_callback(server, resource, scope):
- if self.credentials['client_id'] is None or self.credentials['secret'] is None:
- self.fail('Please specify client_id, secret and tenant to access azure Key Vault.')
-
- tenant = self.credentials.get('tenant')
- if not self.credentials['tenant']:
- tenant = "common"
-
- authcredential = ServicePrincipalCredentials(
- client_id=self.credentials['client_id'],
- secret=self.credentials['secret'],
- tenant=tenant,
- cloud_environment=self._cloud_environment,
- resource="https://vault.azure.net")
-
- token = authcredential.token
- return token['token_type'], token['access_token']
-
- return KeyVaultClient(KeyVaultAuthentication(auth_callback))
-
- def get_secret(self, name, version=''):
- ''' Gets an existing secret '''
- secret_bundle = self.client.get_secret(self.keyvault_uri, name, version)
- if secret_bundle:
- secret_id = KeyVaultId.parse_secret_id(secret_bundle.id)
- return dict(secret_id=secret_id.id, secret_value=secret_bundle.value)
- return None
-
- def create_update_secret(self, name, secret, tags):
- ''' Creates/Updates a secret '''
- secret_bundle = self.client.set_secret(self.keyvault_uri, name, secret, tags)
- secret_id = KeyVaultId.parse_secret_id(secret_bundle.id)
- return secret_id.id
-
- def delete_secret(self, name):
- ''' Deletes a secret '''
- deleted_secret = self.client.delete_secret(self.keyvault_uri, name)
- secret_id = KeyVaultId.parse_secret_id(deleted_secret.id)
- return secret_id.id
-
-
-def main():
- AzureRMKeyVaultSecret()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_loadbalancer.py b/lib/ansible/modules/cloud/azure/azure_rm_loadbalancer.py
deleted file mode 100644
index c036953a7f..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_loadbalancer.py
+++ /dev/null
@@ -1,1042 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Thomas Stringer <tomstr@microsoft.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_loadbalancer
-
-version_added: "2.4"
-
-short_description: Manage Azure load balancers
-
-description:
- - Create, update and delete Azure load balancers.
-
-options:
- resource_group:
- description:
- - Name of a resource group where the load balancer exists or will be created.
- required: true
- name:
- description:
- - Name of the load balancer.
- required: true
- state:
- description:
- - Assert the state of the load balancer. Use C(present) to create/update a load balancer, or C(absent) to delete one.
- default: present
- choices:
- - absent
- - present
- location:
- description:
- - Valid Azure location. Defaults to location of the resource group.
- sku:
- description:
- - The load balancer SKU.
- choices:
- - Basic
- - Standard
- version_added: '2.6'
- frontend_ip_configurations:
- description:
- - List of frontend IPs to be used.
- suboptions:
- name:
- description:
- - Name of the frontend ip configuration.
- required: True
- public_ip_address:
- description:
- - Name of an existing public IP address object in the current resource group to associate with the security group.
- private_ip_address:
- description:
- - The reference of the Public IP resource.
- version_added: '2.6'
- private_ip_allocation_method:
- description:
- - The Private IP allocation method.
- choices:
- - Static
- - Dynamic
- version_added: '2.6'
- subnet:
- description:
- - The reference of the subnet resource.
- - Should be an existing subnet's resource id.
- version_added: '2.6'
- version_added: '2.5'
- backend_address_pools:
- description:
- - List of backend address pools.
- suboptions:
- name:
- description:
- - Name of the backend address pool.
- required: True
- version_added: '2.5'
- probes:
- description:
- - List of probe definitions used to check endpoint health.
- suboptions:
- name:
- description:
- - Name of the probe.
- required: True
- port:
- description:
- - Probe port for communicating the probe. Possible values range from 1 to 65535, inclusive.
- required: True
- protocol:
- description:
- - The protocol of the end point to be probed.
- - If C(Tcp) is specified, a received ACK is required for the probe to be successful.
- - If C(Http) or C(Https) is specified, a 200 OK response from the specified URL is required for the probe to be successful.
- choices:
- - Tcp
- - Http
- - Https
- interval:
- description:
- - The interval, in seconds, for how frequently to probe the endpoint for health status.
- - Slightly less than half the allocated timeout period, which allows two full probes before taking the instance out of rotation.
- - The default value is C(15), the minimum value is C(5).
- default: 15
- fail_count:
- description:
- - The number of probes where if no response, will result in stopping further traffic from being delivered to the endpoint.
- - This values allows endpoints to be taken out of rotation faster or slower than the typical times used in Azure.
- default: 3
- aliases:
- - number_of_probes
- request_path:
- description:
- - The URI used for requesting health status from the VM.
- - Path is required if I(protocol=Http) or I(protocol=Https). Otherwise, it is not allowed.
- version_added: '2.5'
- inbound_nat_pools:
- description:
- - Defines an external port range for inbound NAT to a single backend port on NICs associated with a load balancer.
- - Inbound NAT rules are created automatically for each NIC associated with the Load Balancer using an external port from this range.
- - Defining an Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound Nat rules.
- - Inbound NAT pools are referenced from virtual machine scale sets.
- - NICs that are associated with individual virtual machines cannot reference an inbound NAT pool.
- - They have to reference individual inbound NAT rules.
- suboptions:
- name:
- description:
- - Name of the inbound NAT pool.
- required: True
- frontend_ip_configuration_name:
- description:
- - A reference to frontend IP addresses.
- required: True
- protocol:
- description:
- - IP protocol for the NAT pool.
- choices:
- - Tcp
- - Udp
- - All
- frontend_port_range_start:
- description:
- - The first port in the range of external ports that will be used to provide inbound NAT to NICs associated with the load balancer.
- - Acceptable values range between 1 and 65534.
- required: True
- frontend_port_range_end:
- description:
- - The last port in the range of external ports that will be used to provide inbound NAT to NICs associated with the load balancer.
- - Acceptable values range between 1 and 65535.
- required: True
- backend_port:
- description:
- - The port used for internal connections on the endpoint.
- - Acceptable values are between 1 and 65535.
- version_added: '2.5'
- load_balancing_rules:
- description:
- - Object collection representing the load balancing rules Gets the provisioning.
- suboptions:
- name:
- description:
- - Name of the load balancing rule.
- required: True
- frontend_ip_configuration:
- description:
- - A reference to frontend IP addresses.
- required: True
- backend_address_pool:
- description:
- - A reference to a pool of DIPs. Inbound traffic is randomly load balanced across IPs in the backend IPs.
- required: True
- probe:
- description:
- - The name of the load balancer probe this rule should use for health checks.
- required: True
- protocol:
- description:
- - IP protocol for the load balancing rule.
- choices:
- - Tcp
- - Udp
- - All
- load_distribution:
- description:
- - The session persistence policy for this rule; C(Default) is no persistence.
- choices:
- - Default
- - SourceIP
- - SourceIPProtocol
- default: Default
- frontend_port:
- description:
- - The port for the external endpoint.
- - Frontend port numbers must be unique across all rules within the load balancer.
- - Acceptable values are between 0 and 65534.
- - Note that value 0 enables "Any Port".
- backend_port:
- description:
- - The port used for internal connections on the endpoint.
- - Acceptable values are between 0 and 65535.
- - Note that value 0 enables "Any Port".
- idle_timeout:
- description:
- - The timeout for the TCP idle connection.
- - The value can be set between 4 and 30 minutes.
- - The default value is C(4) minutes.
- - This element is only used when the protocol is set to TCP.
- enable_floating_ip:
- description:
- - Configures SNAT for the VMs in the backend pool to use the publicIP address specified in the frontend of the load balancing rule.
- version_added: '2.5'
- inbound_nat_rules:
- description:
- - Collection of inbound NAT Rules used by a load balancer.
- - Defining inbound NAT rules on your load balancer is mutually exclusive with defining an inbound NAT pool.
- - Inbound NAT pools are referenced from virtual machine scale sets.
- - NICs that are associated with individual virtual machines cannot reference an Inbound NAT pool.
- - They have to reference individual inbound NAT rules.
- suboptions:
- name:
- description:
- - name of the inbound nat rule.
- required: True
- frontend_ip_configuration:
- description:
- - A reference to frontend IP addresses.
- required: True
- protocol:
- description:
- - IP protocol for the inbound nat rule.
- choices:
- - Tcp
- - Udp
- - All
- frontend_port:
- description:
- - The port for the external endpoint.
- - Frontend port numbers must be unique across all rules within the load balancer.
- - Acceptable values are between 0 and 65534.
- - Note that value 0 enables "Any Port".
- backend_port:
- description:
- - The port used for internal connections on the endpoint.
- - Acceptable values are between 0 and 65535.
- - Note that value 0 enables "Any Port".
- idle_timeout:
- description:
- - The timeout for the TCP idle connection.
- - The value can be set between 4 and 30 minutes.
- - The default value is C(4) minutes.
- - This element is only used when I(protocol=Tcp).
- enable_floating_ip:
- description:
- - Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group.
- - This setting is required when using the SQL AlwaysOn Availability Groups in SQL server.
- - This setting can't be changed after you create the endpoint.
- enable_tcp_reset:
- description:
- - Receive bidirectional TCP Reset on TCP flow idle timeout or unexpected connection termination.
- - This element is only used when I(protocol=Tcp).
- version_added: '2.8'
- public_ip_address_name:
- description:
- - (deprecated) Name of an existing public IP address object to associate with the security group.
- - This option has been deprecated, and will be removed in 2.9. Use I(frontend_ip_configurations) instead.
- aliases:
- - public_ip_address
- - public_ip_name
- - public_ip
- probe_port:
- description:
- - (deprecated) The port that the health probe will use.
- - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
- probe_protocol:
- description:
- - (deprecated) The protocol to use for the health probe.
- - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
- choices:
- - Tcp
- - Http
- - Https
- probe_interval:
- description:
- - (deprecated) Time (in seconds) between endpoint health probes.
- - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
- default: 15
- probe_fail_count:
- description:
- - (deprecated) The amount of probe failures for the load balancer to make a health determination.
- - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
- default: 3
- probe_request_path:
- description:
- - (deprecated) The URL that an HTTP probe or HTTPS probe will use (only relevant if I(probe_protocol=Http) or I(probe_protocol=Https)).
- - This option has been deprecated, and will be removed in 2.9. Use I(probes) instead.
- protocol:
- description:
- - (deprecated) The protocol (TCP or UDP) that the load balancer will use.
- - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
- choices:
- - Tcp
- - Udp
- load_distribution:
- description:
- - (deprecated) The type of load distribution that the load balancer will employ.
- - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
- choices:
- - Default
- - SourceIP
- - SourceIPProtocol
- frontend_port:
- description:
- - (deprecated) Frontend port that will be exposed for the load balancer.
- - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
- backend_port:
- description:
- - (deprecated) Backend port that will be exposed for the load balancer.
- - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
- idle_timeout:
- description:
- - (deprecated) Timeout for TCP idle connection in minutes.
- - This option has been deprecated, and will be removed in 2.9. Use I(load_balancing_rules) instead.
- default: 4
- natpool_frontend_port_start:
- description:
- - (deprecated) Start of the port range for a NAT pool.
- - This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead.
- natpool_frontend_port_end:
- description:
- - (deprecated) End of the port range for a NAT pool.
- - This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead.
- natpool_backend_port:
- description:
- - (deprecated) Backend port used by the NAT pool.
- - This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead.
- natpool_protocol:
- description:
- - (deprecated) The protocol for the NAT pool.
- - This option has been deprecated, and will be removed in 2.9. Use I(inbound_nat_pools) instead.
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Thomas Stringer (@trstringer)
- - Yuwei Zhou (@yuwzho)
-'''
-
-EXAMPLES = '''
-- name: create load balancer
- azure_rm_loadbalancer:
- resource_group: myResourceGroup
- name: testloadbalancer1
- frontend_ip_configurations:
- - name: frontendipconf0
- public_ip_address: testpip
- backend_address_pools:
- - name: backendaddrpool0
- probes:
- - name: prob0
- port: 80
- inbound_nat_pools:
- - name: inboundnatpool0
- frontend_ip_configuration_name: frontendipconf0
- protocol: Tcp
- frontend_port_range_start: 80
- frontend_port_range_end: 81
- backend_port: 8080
- load_balancing_rules:
- - name: lbrbalancingrule0
- frontend_ip_configuration: frontendipconf0
- backend_address_pool: backendaddrpool0
- frontend_port: 80
- backend_port: 80
- probe: prob0
- inbound_nat_rules:
- - name: inboundnatrule0
- backend_port: 8080
- protocol: Tcp
- frontend_port: 8080
- frontend_ip_configuration: frontendipconf0
-'''
-
-RETURN = '''
-state:
- description:
- - Current state of the load balancer.
- returned: always
- type: dict
-changed:
- description:
- - Whether or not the resource has changed.
- returned: always
- type: bool
-'''
-
-import random
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
-from ansible.module_utils._text import to_native
-try:
- from msrestazure.tools import parse_resource_id
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-frontend_ip_configuration_spec = dict(
- name=dict(
- type='str',
- required=True
- ),
- public_ip_address=dict(
- type='str'
- ),
- private_ip_address=dict(
- type='str'
- ),
- private_ip_allocation_method=dict(
- type='str'
- ),
- subnet=dict(
- type='str'
- )
-)
-
-
-backend_address_pool_spec = dict(
- name=dict(
- type='str',
- required=True
- )
-)
-
-
-probes_spec = dict(
- name=dict(
- type='str',
- required=True
- ),
- port=dict(
- type='int',
- required=True
- ),
- protocol=dict(
- type='str',
- choices=['Tcp', 'Http', 'Https']
- ),
- interval=dict(
- type='int',
- default=15
- ),
- fail_count=dict(
- type='int',
- default=3,
- aliases=['number_of_probes']
- ),
- request_path=dict(
- type='str'
- )
-)
-
-
-inbound_nat_pool_spec = dict(
- name=dict(
- type='str',
- required=True
- ),
- frontend_ip_configuration_name=dict(
- type='str',
- required=True
- ),
- protocol=dict(
- type='str',
- choices=['Tcp', 'Udp', 'All']
- ),
- frontend_port_range_start=dict(
- type='int',
- required=True
- ),
- frontend_port_range_end=dict(
- type='int',
- required=True
- ),
- backend_port=dict(
- type='int',
- required=True
- )
-)
-
-
-inbound_nat_rule_spec = dict(
- name=dict(
- type='str',
- required=True
- ),
- frontend_ip_configuration=dict(
- type='str',
- required=True
- ),
- protocol=dict(
- type='str',
- choices=['Tcp', 'Udp', 'All']
- ),
- frontend_port=dict(
- type='int',
- required=True
- ),
- idle_timeout=dict(
- type='int'
- ),
- backend_port=dict(
- type='int',
- required=True
- ),
- enable_floating_ip=dict(
- type='bool'
- ),
- enable_tcp_reset=dict(
- type='bool'
- )
-)
-
-
-load_balancing_rule_spec = dict(
- name=dict(
- type='str',
- required=True
- ),
- frontend_ip_configuration=dict(
- type='str',
- required=True
- ),
- backend_address_pool=dict(
- type='str',
- required=True
- ),
- probe=dict(
- type='str',
- required=True
- ),
- protocol=dict(
- type='str',
- choices=['Tcp', 'Udp', 'All']
- ),
- load_distribution=dict(
- type='str',
- choices=['Default', 'SourceIP', 'SourceIPProtocol'],
- default='Default'
- ),
- frontend_port=dict(
- type='int',
- required=True
- ),
- backend_port=dict(
- type='int'
- ),
- idle_timeout=dict(
- type='int',
- default=4
- ),
- enable_floating_ip=dict(
- type='bool'
- )
-)
-
-
-class AzureRMLoadBalancer(AzureRMModuleBase):
- """Configuration class for an Azure RM load balancer resource"""
-
- def __init__(self):
- self.module_args = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- ),
- location=dict(
- type='str'
- ),
- sku=dict(
- type='str',
- choices=['Basic', 'Standard']
- ),
- frontend_ip_configurations=dict(
- type='list',
- elements='dict',
- options=frontend_ip_configuration_spec
- ),
- backend_address_pools=dict(
- type='list',
- elements='dict',
- options=backend_address_pool_spec
- ),
- probes=dict(
- type='list',
- elements='dict',
- options=probes_spec
- ),
- inbound_nat_rules=dict(
- type='list',
- elements='dict',
- options=inbound_nat_rule_spec
- ),
- inbound_nat_pools=dict(
- type='list',
- elements='dict',
- options=inbound_nat_pool_spec
- ),
- load_balancing_rules=dict(
- type='list',
- elements='dict',
- options=load_balancing_rule_spec
- ),
- public_ip_address_name=dict(
- type='str',
- aliases=['public_ip_address', 'public_ip_name', 'public_ip']
- ),
- probe_port=dict(
- type='int'
- ),
- probe_protocol=dict(
- type='str',
- choices=['Tcp', 'Http', 'Https']
- ),
- probe_interval=dict(
- type='int',
- default=15
- ),
- probe_fail_count=dict(
- type='int',
- default=3
- ),
- probe_request_path=dict(
- type='str'
- ),
- protocol=dict(
- type='str',
- choices=['Tcp', 'Udp']
- ),
- load_distribution=dict(
- type='str',
- choices=['Default', 'SourceIP', 'SourceIPProtocol']
- ),
- frontend_port=dict(
- type='int'
- ),
- backend_port=dict(
- type='int'
- ),
- idle_timeout=dict(
- type='int',
- default=4
- ),
- natpool_frontend_port_start=dict(
- type='int'
- ),
- natpool_frontend_port_end=dict(
- type='int'
- ),
- natpool_backend_port=dict(
- type='int'
- ),
- natpool_protocol=dict(
- type='str'
- )
- )
-
- self.resource_group = None
- self.name = None
- self.location = None
- self.sku = None
- self.frontend_ip_configurations = None
- self.backend_address_pools = None
- self.probes = None
- self.inbound_nat_rules = None
- self.inbound_nat_pools = None
- self.load_balancing_rules = None
- self.public_ip_address_name = None
- self.state = None
- self.probe_port = None
- self.probe_protocol = None
- self.probe_interval = None
- self.probe_fail_count = None
- self.probe_request_path = None
- self.protocol = None
- self.load_distribution = None
- self.frontend_port = None
- self.backend_port = None
- self.idle_timeout = None
- self.natpool_frontend_port_start = None
- self.natpool_frontend_port_end = None
- self.natpool_backend_port = None
- self.natpool_protocol = None
- self.tags = None
-
- self.results = dict(changed=False, state=dict())
-
- super(AzureRMLoadBalancer, self).__init__(
- derived_arg_spec=self.module_args,
- supports_check_mode=True
- )
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
- for key in list(self.module_args.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- changed = False
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- load_balancer = self.get_load_balancer()
-
- if self.state == 'present':
- # compatible parameters
- is_compatible_param = not self.frontend_ip_configurations and not self.backend_address_pools and not self.probes and not self.inbound_nat_pools
- is_compatible_param = is_compatible_param and not load_balancer # the instance should not be exist
- is_compatible_param = is_compatible_param or self.public_ip_address_name or self.probe_protocol or self.natpool_protocol or self.protocol
- if is_compatible_param:
- self.deprecate('Discrete load balancer config settings are deprecated and will be removed.'
- ' Use frontend_ip_configurations, backend_address_pools, probes, inbound_nat_pools lists instead.', version='2.9')
- frontend_ip_name = 'frontendip0'
- backend_address_pool_name = 'backendaddrp0'
- prob_name = 'prob0'
- inbound_nat_pool_name = 'inboundnatp0'
- lb_rule_name = 'lbr'
- self.frontend_ip_configurations = [dict(
- name=frontend_ip_name,
- public_ip_address=self.public_ip_address_name
- )]
- self.backend_address_pools = [dict(
- name=backend_address_pool_name
- )]
- self.probes = [dict(
- name=prob_name,
- port=self.probe_port,
- protocol=self.probe_protocol,
- interval=self.probe_interval,
- fail_count=self.probe_fail_count,
- request_path=self.probe_request_path
- )] if self.probe_protocol else None
- self.inbound_nat_pools = [dict(
- name=inbound_nat_pool_name,
- frontend_ip_configuration_name=frontend_ip_name,
- protocol=self.natpool_protocol,
- frontend_port_range_start=self.natpool_frontend_port_start,
- frontend_port_range_end=self.natpool_frontend_port_end,
- backend_port=self.natpool_backend_port
- )] if self.natpool_protocol else None
- self.load_balancing_rules = [dict(
- name=lb_rule_name,
- frontend_ip_configuration=frontend_ip_name,
- backend_address_pool=backend_address_pool_name,
- probe=prob_name,
- protocol=self.protocol,
- load_distribution=self.load_distribution,
- frontend_port=self.frontend_port,
- backend_port=self.backend_port,
- idle_timeout=self.idle_timeout,
- enable_floating_ip=False
- )] if self.protocol else None
-
- # create new load balancer structure early, so it can be easily compared
- frontend_ip_configurations_param = [self.network_models.FrontendIPConfiguration(
- name=item.get('name'),
- public_ip_address=self.get_public_ip_address_instance(item.get('public_ip_address')) if item.get('public_ip_address') else None,
- private_ip_address=item.get('private_ip_address'),
- private_ip_allocation_method=item.get('private_ip_allocation_method'),
- subnet=self.network_models.Subnet(id=item.get('subnet')) if item.get('subnet') else None
- ) for item in self.frontend_ip_configurations] if self.frontend_ip_configurations else None
-
- backend_address_pools_param = [self.network_models.BackendAddressPool(
- name=item.get('name')
- ) for item in self.backend_address_pools] if self.backend_address_pools else None
-
- probes_param = [self.network_models.Probe(
- name=item.get('name'),
- port=item.get('port'),
- protocol=item.get('protocol'),
- interval_in_seconds=item.get('interval'),
- request_path=item.get('request_path'),
- number_of_probes=item.get('fail_count')
- ) for item in self.probes] if self.probes else None
-
- inbound_nat_pools_param = [self.network_models.InboundNatPool(
- name=item.get('name'),
- frontend_ip_configuration=self.network_models.SubResource(
- id=frontend_ip_configuration_id(
- self.subscription_id,
- self.resource_group,
- self.name,
- item.get('frontend_ip_configuration_name'))),
- protocol=item.get('protocol'),
- frontend_port_range_start=item.get('frontend_port_range_start'),
- frontend_port_range_end=item.get('frontend_port_range_end'),
- backend_port=item.get('backend_port')
- ) for item in self.inbound_nat_pools] if self.inbound_nat_pools else None
-
- load_balancing_rules_param = [self.network_models.LoadBalancingRule(
- name=item.get('name'),
- frontend_ip_configuration=self.network_models.SubResource(
- id=frontend_ip_configuration_id(
- self.subscription_id,
- self.resource_group,
- self.name,
- item.get('frontend_ip_configuration')
- )
- ),
- backend_address_pool=self.network_models.SubResource(
- id=backend_address_pool_id(
- self.subscription_id,
- self.resource_group,
- self.name,
- item.get('backend_address_pool')
- )
- ),
- probe=self.network_models.SubResource(
- id=probe_id(
- self.subscription_id,
- self.resource_group,
- self.name,
- item.get('probe')
- )
- ),
- protocol=item.get('protocol'),
- load_distribution=item.get('load_distribution'),
- frontend_port=item.get('frontend_port'),
- backend_port=item.get('backend_port'),
- idle_timeout_in_minutes=item.get('idle_timeout'),
- enable_floating_ip=item.get('enable_floating_ip')
- ) for item in self.load_balancing_rules] if self.load_balancing_rules else None
-
- inbound_nat_rules_param = [self.network_models.InboundNatRule(
- name=item.get('name'),
- frontend_ip_configuration=self.network_models.SubResource(
- id=frontend_ip_configuration_id(
- self.subscription_id,
- self.resource_group,
- self.name,
- item.get('frontend_ip_configuration')
- )
- ) if item.get('frontend_ip_configuration') else None,
- protocol=item.get('protocol'),
- frontend_port=item.get('frontend_port'),
- backend_port=item.get('backend_port'),
- idle_timeout_in_minutes=item.get('idle_timeout'),
- enable_tcp_reset=item.get('enable_tcp_reset'),
- enable_floating_ip=item.get('enable_floating_ip')
- ) for item in self.inbound_nat_rules] if self.inbound_nat_rules else None
-
- # construct the new instance, if the parameter is none, keep remote one
- self.new_load_balancer = self.network_models.LoadBalancer(
- sku=self.network_models.LoadBalancerSku(name=self.sku) if self.sku else None,
- location=self.location,
- tags=self.tags,
- frontend_ip_configurations=frontend_ip_configurations_param,
- backend_address_pools=backend_address_pools_param,
- probes=probes_param,
- inbound_nat_pools=inbound_nat_pools_param,
- load_balancing_rules=load_balancing_rules_param,
- inbound_nat_rules=inbound_nat_rules_param
- )
-
- self.new_load_balancer = self.assign_protocol(self.new_load_balancer, load_balancer)
-
- if load_balancer:
- self.new_load_balancer = self.object_assign(self.new_load_balancer, load_balancer)
- load_balancer_dict = load_balancer.as_dict()
- new_dict = self.new_load_balancer.as_dict()
- if not default_compare(new_dict, load_balancer_dict, ''):
- changed = True
- else:
- changed = False
- else:
- changed = True
- elif self.state == 'absent' and load_balancer:
- changed = True
-
- self.results['state'] = load_balancer.as_dict() if load_balancer else {}
- if 'tags' in self.results['state']:
- update_tags, self.results['state']['tags'] = self.update_tags(self.results['state']['tags'])
- if update_tags:
- changed = True
- else:
- if self.tags:
- changed = True
- self.results['changed'] = changed
-
- if self.state == 'present' and changed:
- self.results['state'] = self.create_or_update_load_balancer(self.new_load_balancer).as_dict()
- elif self.state == 'absent' and changed:
- self.delete_load_balancer()
- self.results['state'] = None
-
- return self.results
-
- def get_public_ip_address_instance(self, id):
- """Get a reference to the public ip address resource"""
- self.log('Fetching public ip address {0}'.format(id))
- resource_id = format_resource_id(id, self.subscription_id, 'Microsoft.Network', 'publicIPAddresses', self.resource_group)
- return self.network_models.PublicIPAddress(id=resource_id)
-
- def get_load_balancer(self):
- """Get a load balancer"""
- self.log('Fetching loadbalancer {0}'.format(self.name))
- try:
- return self.network_client.load_balancers.get(self.resource_group, self.name)
- except CloudError:
- return None
-
- def delete_load_balancer(self):
- """Delete a load balancer"""
- self.log('Deleting loadbalancer {0}'.format(self.name))
- try:
- poller = self.network_client.load_balancers.delete(self.resource_group, self.name)
- return self.get_poller_result(poller)
- except CloudError as exc:
- self.fail("Error deleting loadbalancer {0} - {1}".format(self.name, str(exc)))
-
- def create_or_update_load_balancer(self, param):
- try:
- poller = self.network_client.load_balancers.create_or_update(self.resource_group, self.name, param)
- new_lb = self.get_poller_result(poller)
- return new_lb
- except CloudError as exc:
- self.fail("Error creating or updating load balancer {0} - {1}".format(self.name, str(exc)))
-
- def object_assign(self, patch, origin):
- attribute_map = set(self.network_models.LoadBalancer._attribute_map.keys()) - set(self.network_models.LoadBalancer._validation.keys())
- for key in attribute_map:
- if not getattr(patch, key):
- setattr(patch, key, getattr(origin, key))
- return patch
-
- def assign_protocol(self, patch, origin):
- attribute_map = ['probes', 'inbound_nat_rules', 'inbound_nat_pools', 'load_balancing_rules']
- for attribute in attribute_map:
- properties = getattr(patch, attribute)
- if not properties:
- continue
- references = getattr(origin, attribute) if origin else []
- for item in properties:
- if item.protocol:
- continue
- refs = [x for x in references if to_native(x.name) == item.name]
- ref = refs[0] if len(refs) > 0 else None
- item.protocol = ref.protocol if ref else 'Tcp'
- return patch
-
-
-def default_compare(new, old, path):
- if isinstance(new, dict):
- if not isinstance(old, dict):
- return False
- for k in new.keys():
- if not default_compare(new.get(k), old.get(k, None), path + '/' + k):
- return False
- return True
- elif isinstance(new, list):
- if not isinstance(old, list) or len(new) != len(old):
- return False
- if len(old) == 0:
- return True
- if isinstance(old[0], dict):
- key = None
- if 'id' in old[0] and 'id' in new[0]:
- key = 'id'
- elif 'name' in old[0] and 'name' in new[0]:
- key = 'name'
- new = sorted(new, key=lambda x: x.get(key, None))
- old = sorted(old, key=lambda x: x.get(key, None))
- else:
- new = sorted(new)
- old = sorted(old)
- for i in range(len(new)):
- if not default_compare(new[i], old[i], path + '/*'):
- return False
- return True
- else:
- return new == old
-
-
-def frontend_ip_configuration_id(subscription_id, resource_group_name, load_balancer_name, name):
- """Generate the id for a frontend ip configuration"""
- return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/loadBalancers/{2}/frontendIPConfigurations/{3}'.format(
- subscription_id,
- resource_group_name,
- load_balancer_name,
- name
- )
-
-
-def backend_address_pool_id(subscription_id, resource_group_name, load_balancer_name, name):
- """Generate the id for a backend address pool"""
- return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/loadBalancers/{2}/backendAddressPools/{3}'.format(
- subscription_id,
- resource_group_name,
- load_balancer_name,
- name
- )
-
-
-def probe_id(subscription_id, resource_group_name, load_balancer_name, name):
- """Generate the id for a probe"""
- return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/loadBalancers/{2}/probes/{3}'.format(
- subscription_id,
- resource_group_name,
- load_balancer_name,
- name
- )
-
-
-def main():
- """Main execution"""
- AzureRMLoadBalancer()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_loadbalancer_info.py b/lib/ansible/modules/cloud/azure/azure_rm_loadbalancer_info.py
deleted file mode 100644
index af4507c93f..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_loadbalancer_info.py
+++ /dev/null
@@ -1,177 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Thomas Stringer <tomstr@microsoft.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_loadbalancer_info
-
-version_added: "2.9"
-
-short_description: Get load balancer facts
-
-description:
- - Get facts for a specific load balancer or all load balancers.
-
-options:
- name:
- description:
- - Limit results to a specific resource group.
- resource_group:
- description:
- - The resource group to search for the desired load balancer.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Thomas Stringer (@trstringer)
-'''
-
-EXAMPLES = '''
- - name: Get facts for one load balancer
- azure_rm_loadbalancer_info:
- name: Testing
- resource_group: myResourceGroup
-
- - name: Get facts for all load balancers
- azure_rm_loadbalancer_info:
-
- - name: Get facts for all load balancers in a specific resource group
- azure_rm_loadbalancer_info:
- resource_group: myResourceGroup
-
- - name: Get facts by tags
- azure_rm_loadbalancer_info:
- tags:
- - testing
-'''
-
-RETURN = '''
-azure_loadbalancers:
- description:
- - List of load balancer dicts.
- returned: always
- type: list
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.common import AzureHttpError
-except Exception:
- # handled in azure_rm_common
- pass
-
-AZURE_OBJECT_CLASS = 'LoadBalancer'
-
-
-class AzureRMLoadBalancerInfo(AzureRMModuleBase):
- """Utility class to get load balancer facts"""
-
- def __init__(self):
-
- self.module_args = dict(
- name=dict(type='str'),
- resource_group=dict(type='str'),
- tags=dict(type='list')
- )
-
- self.results = dict(
- changed=False,
- ansible_info=dict(
- azure_loadbalancers=[]
- )
- )
-
- self.name = None
- self.resource_group = None
- self.tags = None
-
- super(AzureRMLoadBalancerInfo, self).__init__(
- derived_arg_spec=self.module_args,
- supports_tags=False,
- facts_module=True
- )
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_loadbalancer_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_loadbalancer_facts' module has been renamed to 'azure_rm_loadbalancer_info'", version='2.13')
-
- for key in self.module_args:
- setattr(self, key, kwargs[key])
-
- self.results['ansible_info']['azure_loadbalancers'] = (
- self.get_item() if self.name
- else self.list_items()
- )
-
- return self.results
-
- def get_item(self):
- """Get a single load balancer"""
-
- self.log('Get properties for {0}'.format(self.name))
-
- item = None
- result = []
-
- try:
- item = self.network_client.load_balancers.get(self.resource_group, self.name)
- except CloudError:
- pass
-
- if item and self.has_tags(item.tags, self.tags):
- result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
-
- return result
-
- def list_items(self):
- """Get all load balancers"""
-
- self.log('List all load balancers')
-
- if self.resource_group:
- try:
- response = self.network_client.load_balancers.list(self.resource_group)
- except AzureHttpError as exc:
- self.fail('Failed to list items in resource group {0} - {1}'.format(self.resource_group, str(exc)))
- else:
- try:
- response = self.network_client.load_balancers.list_all()
- except AzureHttpError as exc:
- self.fail('Failed to list all items - {0}'.format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
-
- return results
-
-
-def main():
- """Main module execution code path"""
-
- AzureRMLoadBalancerInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_lock.py b/lib/ansible/modules/cloud/azure/azure_rm_lock.py
deleted file mode 100644
index 88cc5bf3af..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_lock.py
+++ /dev/null
@@ -1,216 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_lock
-version_added: "2.9"
-short_description: Manage Azure locks
-description:
- - Create, delete an Azure lock.
- - To create or delete management locks, you must have access to Microsoft.Authorization/* or Microsoft.Authorization/locks/* actions.
- - Of the built-in roles, only Owner and User Access Administrator are granted those actions.
-options:
- name:
- description:
- - Name of the lock.
- type: str
- required: true
- managed_resource_id:
- description:
- - Manage a lock for the specified resource ID.
- - Mutually exclusive with I(resource_group).
- - If neither I(managed_resource_id) or I(resource_group) are specified, manage a lock for the current subscription.
- - "'/subscriptions/{subscriptionId}' for subscriptions."
- - "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups."
- - "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources."
- type: str
- resource_group:
- description:
- - Manage a lock for the named resource group.
- - Mutually exclusive with I(managed_resource_id).
- - If neither I(managed_resource_id) or I(resource_group) are specified, manage a lock for the current subscription.
- type: str
- state:
- description:
- - State of the lock.
- - Use C(present) to create or update a lock and C(absent) to delete a lock.
- type: str
- default: present
- choices:
- - absent
- - present
- level:
- description:
- - The lock level type.
- type: str
- choices:
- - can_not_delete
- - read_only
-extends_documentation_fragment:
- - azure
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
-- name: Create a lock for a resource
- azure_rm_lock:
- managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM
- name: myLock
- level: read_only
-
-- name: Create a lock for a resource group
- azure_rm_lock:
- managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup
- name: myLock
- level: read_only
-
-- name: Create a lock for a resource group
- azure_rm_lock:
- resource_group: myResourceGroup
- name: myLock
- level: read_only
-
-- name: Create a lock for a subscription
- azure_rm_lock:
- name: myLock
- level: read_only
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID of the lock.
- returned: success
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Authorization/locks/keep"
-''' # NOQA
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMLock(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- resource_group=dict(type='str'),
- managed_resource_id=dict(type='str'),
- level=dict(type='str', choices=['can_not_delete', 'read_only'])
- )
-
- self.results = dict(
- changed=False,
- id=None
- )
-
- required_if = [
- ('state', 'present', ['level'])
- ]
-
- mutually_exclusive = [['resource_group', 'managed_resource_id']]
-
- self.name = None
- self.state = None
- self.level = None
- self.resource_group = None
- self.managed_resource_id = None
-
- super(AzureRMLock, self).__init__(self.module_arg_spec,
- supports_check_mode=True,
- required_if=required_if,
- mutually_exclusive=mutually_exclusive,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
-
- for key in self.module_arg_spec.keys():
- setattr(self, key, kwargs[key])
-
- changed = False
- # construct scope id
- scope = self.get_scope()
- lock = self.get_lock(scope)
- if self.state == 'present':
- lock_level = getattr(self.lock_models.LockLevel, self.level)
- if not lock:
- changed = True
- lock = self.lock_models.ManagementLockObject(level=lock_level)
- elif lock.level != lock_level:
- self.log('Lock level changed')
- lock.level = lock_level
- changed = True
- if not self.check_mode:
- lock = self.create_or_update_lock(scope, lock)
- self.results['id'] = lock.id
- elif lock:
- changed = True
- if not self.check_mode:
- self.delete_lock(scope)
- self.results['changed'] = changed
- return self.results
-
- def delete_lock(self, scope):
- try:
- return self.lock_client.management_locks.delete_by_scope(scope, self.name)
- except CloudError as exc:
- self.fail('Error when deleting lock {0} for {1}: {2}'.format(self.name, scope, exc.message))
-
- def create_or_update_lock(self, scope, lock):
- try:
- return self.lock_client.management_locks.create_or_update_by_scope(scope, self.name, lock)
- except CloudError as exc:
- self.fail('Error when creating or updating lock {0} for {1}: {2}'.format(self.name, scope, exc.message))
-
- def get_lock(self, scope):
- try:
- return self.lock_client.management_locks.get_by_scope(scope, self.name)
- except CloudError as exc:
- if exc.status_code in [404]:
- return None
- self.fail('Error when getting lock {0} for {1}: {2}'.format(self.name, scope, exc.message))
-
- def get_scope(self):
- '''
- Get the resource scope of the lock management.
- '/subscriptions/{subscriptionId}' for subscriptions,
- '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups,
- '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources.
- '''
- if self.managed_resource_id:
- return self.managed_resource_id
- elif self.resource_group:
- return '/subscriptions/{0}/resourcegroups/{1}'.format(self.subscription_id, self.resource_group)
- else:
- return '/subscriptions/{0}'.format(self.subscription_id)
-
-
-def main():
- AzureRMLock()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_lock_info.py b/lib/ansible/modules/cloud/azure/azure_rm_lock_info.py
deleted file mode 100644
index d0761d2175..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_lock_info.py
+++ /dev/null
@@ -1,223 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_lock_info
-version_added: "2.9"
-short_description: Manage Azure locks
-description:
- - Create, delete an Azure lock.
-options:
- name:
- description:
- - Name of the lock.
- type: str
- required: true
- managed_resource_id:
- description:
- - ID of the resource where need to manage the lock.
- - Get this via facts module.
- - Cannot be set mutual with I(resource_group).
- - Manage subscription if both I(managed_resource_id) and I(resource_group) not defined.
- - "'/subscriptions/{subscriptionId}' for subscriptions."
- - "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups."
- - "'/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources."
- - Can get all locks with 'child scope' for this resource, use I(managed_resource_id) in response for further management.
- type: str
- resource_group:
- description:
- - Resource group name where need to manage the lock.
- - The lock is in the resource group level.
- - Cannot be set mutual with I(managed_resource_id).
- - Query subscription if both I(managed_resource_id) and I(resource_group) not defined.
- - Can get all locks with 'child scope' in this resource group, use the I(managed_resource_id) in response for further management.
- type: str
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
-- name: Get myLock details of myVM
- azure_rm_lock_info:
- name: myLock
- managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM
-
-- name: List locks of myVM
- azure_rm_lock_info:
- managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVM
-
-- name: List locks of myResourceGroup
- azure_rm_lock_info:
- resource_group: myResourceGroup
-
-- name: List locks of myResourceGroup
- azure_rm_lock_info:
- managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup
-
-- name: List locks of mySubscription
- azure_rm_lock_info:
-
-- name: List locks of mySubscription
- azure_rm_lock_info:
- managed_resource_id: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
-'''
-
-RETURN = '''
-locks:
- description:
- - List of locks dicts.
- returned: always
- type: complex
- contains:
- id:
- description:
- - ID of the Lock.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Authorization/locks/myLock"
- name:
- description:
- - Name of the lock.
- returned: always
- type: str
- sample: myLock
- level:
- description:
- - Type level of the lock.
- returned: always
- type: str
- sample: can_not_delete
- notes:
- description:
- - Notes of the lock added by creator.
- returned: always
- type: str
- sample: "This is a lock"
-''' # NOQA
-
-import json
-import re
-from ansible.module_utils.common.dict_transformations import _camel_to_snake
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.azure_rm_common_rest import GenericRestClient
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMLockInfo(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str'),
- resource_group=dict(type='str'),
- managed_resource_id=dict(type='str')
- )
-
- self.results = dict(
- changed=False,
- locks=[]
- )
-
- mutually_exclusive = [['resource_group', 'managed_resource_id']]
-
- self.name = None
- self.resource_group = None
- self.managed_resource_id = None
- self._mgmt_client = None
- self._query_parameters = {'api-version': '2016-09-01'}
- self._header_parameters = {'Content-Type': 'application/json; charset=utf-8'}
-
- super(AzureRMLockInfo, self).__init__(self.module_arg_spec, facts_module=True, mutually_exclusive=mutually_exclusive, supports_tags=False)
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_lock_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_lock_facts' module has been renamed to 'azure_rm_lock_info'", version='2.13')
-
- for key in self.module_arg_spec.keys():
- setattr(self, key, kwargs[key])
-
- self._mgmt_client = self.get_mgmt_svc_client(GenericRestClient, base_url=self._cloud_environment.endpoints.resource_manager)
- changed = False
- # construct scope id
- scope = self.get_scope()
- url = '/{0}/providers/Microsoft.Authorization/locks'.format(scope)
- if self.name:
- url = '{0}/{1}'.format(url, self.name)
- locks = self.list_locks(url)
- resp = locks.get('value') if 'value' in locks else [locks]
- self.results['locks'] = [self.to_dict(x) for x in resp]
- return self.results
-
- def to_dict(self, lock):
- resp = dict(
- id=lock['id'],
- name=lock['name'],
- level=_camel_to_snake(lock['properties']['level']),
- managed_resource_id=re.sub('/providers/Microsoft.Authorization/locks/.+', '', lock['id'])
- )
- if lock['properties'].get('notes'):
- resp['notes'] = lock['properties']['notes']
- if lock['properties'].get('owners'):
- resp['owners'] = [x['application_id'] for x in lock['properties']['owners']]
- return resp
-
- def list_locks(self, url):
- try:
- resp = self._mgmt_client.query(url=url,
- method='GET',
- query_parameters=self._query_parameters,
- header_parameters=self._header_parameters,
- body=None,
- expected_status_codes=[200],
- polling_timeout=None,
- polling_interval=None)
- return json.loads(resp.text)
- except CloudError as exc:
- self.fail('Error when finding locks {0}: {1}'.format(url, exc.message))
-
- def get_scope(self):
- '''
- Get the resource scope of the lock management.
- '/subscriptions/{subscriptionId}' for subscriptions,
- '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}' for resource groups,
- '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{namespace}/{resourceType}/{resourceName}' for resources.
- '''
- if self.managed_resource_id:
- return self.managed_resource_id
- elif self.resource_group:
- return '/subscriptions/{0}/resourcegroups/{1}'.format(self.subscription_id, self.resource_group)
- else:
- return '/subscriptions/{0}'.format(self.subscription_id)
-
-
-def main():
- AzureRMLockInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_loganalyticsworkspace.py b/lib/ansible/modules/cloud/azure/azure_rm_loganalyticsworkspace.py
deleted file mode 100644
index c6f41eb6b1..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_loganalyticsworkspace.py
+++ /dev/null
@@ -1,321 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_loganalyticsworkspace
-version_added: "2.8"
-short_description: Manage Azure Log Analytics workspaces
-description:
- - Create, delete Azure Log Analytics workspaces.
-options:
- resource_group:
- description:
- - Name of resource group.
- required: true
- name:
- description:
- - Name of the workspace.
- required: true
- state:
- description:
- - Assert the state of the image. Use C(present) to create or update a image and C(absent) to delete an image.
- default: present
- choices:
- - absent
- - present
- location:
- description:
- - Resource location.
- sku:
- description:
- - The SKU of the workspace.
- choices:
- - free
- - standard
- - premium
- - unlimited
- - per_node
- - per_gb2018
- - standalone
- default: per_gb2018
- retention_in_days:
- description:
- - The workspace data retention in days.
- - -1 means Unlimited retention for I(sku=unlimited).
- - 730 days is the maximum allowed for all other SKUs.
- intelligence_packs:
- description:
- - Manage intelligence packs possible for this workspace.
- - Enable one pack by setting it to C(true). For example "Backup:true".
- - Disable one pack by setting it to C(false). For example "Backup:false".
- - Other intelligence packs not list in this property will not be changed.
- type: dict
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yuwei Zhou (@yuwzho)
-'''
-
-EXAMPLES = '''
-- name: Create a workspace with backup enabled
- azure_rm_loganalyticsworkspace:
- resource_group: myResourceGroup
- name: myLogAnalyticsWorkspace
- intelligence_packs:
- Backup: true
-'''
-
-RETURN = '''
-id:
- description:
- - Workspace resource path.
- type: str
- returned: success
- example: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.OperationalInsights/workspaces/m
- yLogAnalyticsWorkspace"
-location:
- description:
- - Resource location.
- type: str
- returned: success
- example: eastus
-sku:
- description:
- - The SKU of the workspace.
- type: str
- returned: success
- example: "per_gb2018"
-retention_in_days:
- description:
- - The workspace data retention in days.
- - -1 means Unlimited retention for I(sku=unlimited).
- - 730 days is the maximum allowed for all other SKUs.
- type: int
- returned: success
- example: 40
-intelligence_packs:
- description:
- - Lists all the intelligence packs possible and whether they are enabled or disabled for a given workspace.
- type: list
- returned: success
- example: ['name': 'CapacityPerformance', 'enabled': true]
-management_groups:
- description:
- - Management groups connected to the workspace.
- type: dict
- returned: success
- example: {'value': []}
-shared_keys:
- description:
- - Shared keys for the workspace.
- type: dict
- returned: success
- example: {
- 'primarySharedKey': 'BozLY1JnZbxu0jWUQSY8iRPEM8ObmpP8rW+8bUl3+HpDJI+n689SxXgTgU7k1qdxo/WugRLxechxbolAfHM5uA==',
- 'secondarySharedKey': '7tDt5W0JBrCQKtQA3igfFltLSzJeyr9LmuT+B/ibzd8cdC1neZ1ePOQLBx5NUzc0q2VUIK0cLhWNyFvo/hT8Ww=='
- }
-usages:
- description:
- - Usage metrics for the workspace.
- type: dict
- returned: success
- example: {
- 'value': [
- {
- 'name': {
- 'value': 'DataAnalyzed',
- 'localizedValue': 'Data Analyzed'
- },
- 'unit': 'Bytes',
- 'currentValue': 0,
- 'limit': 524288000,
- 'nextResetTime': '2017-10-03T00:00:00Z',
- 'quotaPeriod': 'P1D'
- }
- ]
- }
-''' # NOQA
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
-from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
-
-try:
- from msrestazure.tools import parse_resource_id
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMLogAnalyticsWorkspace(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- location=dict(type='str'),
- sku=dict(type='str', default='per_gb2018', choices=['free', 'standard', 'premium', 'unlimited', 'per_node', 'per_gb2018', 'standalone']),
- retention_in_days=dict(type='int'),
- intelligence_packs=dict(type='dict')
- )
-
- self.results = dict(
- changed=False,
- id=None
- )
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.location = None
- self.sku = None
- self.retention_in_days = None
- self.intelligence_packs = None
-
- super(AzureRMLogAnalyticsWorkspace, self).__init__(self.module_arg_spec, supports_check_mode=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- self.results = dict()
- changed = False
-
- if not self.location:
- resource_group = self.get_resource_group(self.resource_group)
- self.location = resource_group.location
-
- if self.sku == 'per_gb2018':
- self.sku = 'PerGB2018'
- else:
- self.sku = _snake_to_camel(self.sku)
- workspace = self.get_workspace()
- if not workspace and self.state == 'present':
- changed = True
- workspace = self.log_analytics_models.Workspace(sku=self.log_analytics_models.Sku(name=self.sku),
- retention_in_days=self.retention_in_days,
- location=self.location)
- if not self.check_mode:
- workspace = self.create_workspace(workspace)
- elif workspace and self.state == 'absent':
- changed = True
- workspace = None
- if not self.check_mode:
- self.delete_workspace()
- if workspace and workspace.id:
- self.results = self.to_dict(workspace)
- self.results['intelligence_packs'] = self.list_intelligence_packs()
- self.results['management_groups'] = self.list_management_groups()
- self.results['usages'] = self.list_usages()
- self.results['shared_keys'] = self.get_shared_keys()
- # handle the intelligence pack
- if workspace and workspace.id and self.intelligence_packs:
- intelligence_packs = self.results['intelligence_packs']
- for key in self.intelligence_packs.keys():
- enabled = self.intelligence_packs[key]
- for x in intelligence_packs:
- if x['name'].lower() == key.lower():
- if x['enabled'] != enabled:
- changed = True
- if not self.check_mode:
- self.change_intelligence(x['name'], enabled)
- x['enabled'] = enabled
- break
- self.results['changed'] = changed
- return self.results
-
- def create_workspace(self, workspace):
- try:
- poller = self.log_analytics_client.workspaces.create_or_update(self.resource_group, self.name, workspace)
- return self.get_poller_result(poller)
- except CloudError as exc:
- self.fail('Error when creating workspace {0} - {1}'.format(self.name, exc.message or str(exc)))
-
- def get_workspace(self):
- try:
- return self.log_analytics_client.workspaces.get(self.resource_group, self.name)
- except CloudError:
- pass
-
- def delete_workspace(self):
- try:
- self.log_analytics_client.workspaces.delete(self.resource_group, self.name)
- except CloudError as exc:
- self.fail('Error when deleting workspace {0} - {1}'.format(self.name, exc.message or str(exc)))
-
- def to_dict(self, workspace):
- result = workspace.as_dict()
- result['sku'] = _camel_to_snake(workspace.sku.name)
- return result
-
- def list_intelligence_packs(self):
- try:
- response = self.log_analytics_client.workspaces.list_intelligence_packs(self.resource_group, self.name)
- return [x.as_dict() for x in response]
- except CloudError as exc:
- self.fail('Error when listing intelligence packs {0}'.format(exc.message or str(exc)))
-
- def change_intelligence(self, key, value):
- try:
- if value:
- self.log_analytics_client.workspaces.enable_intelligence_pack(self.resource_group, self.name, key)
- else:
- self.log_analytics_client.workspaces.disable_intelligence_pack(self.resource_group, self.name, key)
- except CloudError as exc:
- self.fail('Error when changing intelligence pack {0} - {1}'.format(key, exc.message or str(exc)))
-
- def list_management_groups(self):
- result = []
- try:
- response = self.log_analytics_client.workspaces.list_management_groups(self.resource_group, self.name)
- while True:
- result.append(response.next().as_dict())
- except StopIteration:
- pass
- except CloudError as exc:
- self.fail('Error when listing management groups {0}'.format(exc.message or str(exc)))
- return result
-
- def list_usages(self):
- result = []
- try:
- response = self.log_analytics_client.workspaces.list_usages(self.resource_group, self.name)
- while True:
- result.append(response.next().as_dict())
- except StopIteration:
- pass
- except CloudError as exc:
- self.fail('Error when listing usages {0}'.format(exc.message or str(exc)))
- return result
-
- def get_shared_keys(self):
- try:
- return self.log_analytics_client.workspaces.get_shared_keys(self.resource_group, self.name).as_dict()
- except CloudError as exc:
- self.fail('Error when getting shared key {0}'.format(exc.message or str(exc)))
-
-
-def main():
- AzureRMLogAnalyticsWorkspace()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_loganalyticsworkspace_info.py b/lib/ansible/modules/cloud/azure/azure_rm_loganalyticsworkspace_info.py
deleted file mode 100644
index 03f841eedd..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_loganalyticsworkspace_info.py
+++ /dev/null
@@ -1,269 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_loganalyticsworkspace_info
-version_added: "2.9"
-short_description: Get facts of Azure Log Analytics workspaces
-description:
- - Get, query Azure Log Analytics workspaces.
-options:
- resource_group:
- description:
- - Name of resource group.
- required: True
- name:
- description:
- - Name of the workspace.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- show_intelligence_packs:
- description:
- - Show the intelligence packs for a workspace.
- - Note this will cost one more network overhead for each workspace, expected slow response.
- show_management_groups:
- description:
- - Show the management groups for a workspace.
- - Note this will cost one more network overhead for each workspace, expected slow response.
- show_shared_keys:
- description:
- - Show the shared keys for a workspace.
- - Note this will cost one more network overhead for each workspace, expected slow response.
- show_usages:
- description:
- - Show the list of usages for a workspace.
- - Note this will cost one more network overhead for each workspace, expected slow response.
-extends_documentation_fragment:
- - azure
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
-- name: Query a workspace
- azure_rm_loganalyticsworkspace_info:
- resource_group: myResourceGroup
- name: myLogAnalyticsWorkspace
- show_intelligence_packs: true
- show_management_groups: true
- show_shared_keys: true
- show_usages: true
-'''
-
-RETURN = '''
-id:
- description:
- - Workspace resource path.
- type: str
- returned: success
- example: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.OperationalInsights/workspaces/m
- yLogAnalyticsWorkspace"
-location:
- description:
- - Resource location.
- type: str
- returned: success
- example: "eastus"
-sku:
- description:
- - The SKU of the workspace.
- type: str
- returned: success
- example: "per_gb2018"
-retention_in_days:
- description:
- - The workspace data retention in days.
- - -1 means Unlimited retention for I(sku=unlimited).
- - 730 days is the maximum allowed for all other SKUs.
- type: int
- returned: success
- example: 40
-intelligence_packs:
- description:
- - Lists all the intelligence packs possible and whether they are enabled or disabled for a given workspace.
- type: list
- returned: success
- example: [ {'name': 'CapacityPerformance', 'enabled': true} ]
-management_groups:
- description:
- - Management groups connected to the workspace.
- type: dict
- returned: success
- example: {'value': []}
-shared_keys:
- description:
- - Shared keys for the workspace.
- type: dict
- returned: success
- example: {
- 'primarySharedKey': 'BozLY1JnZbxu0jWUQSY8iRPEM8ObmpP8rW+8bUl3+HpDJI+n689SxXgTgU7k1qdxo/WugRLxechxbolAfHM5uA==',
- 'secondarySharedKey': '7tDt5W0JBrCQKtQA3igfFltLSzJeyr9LmuT+B/ibzd8cdC1neZ1ePOQLBx5NUzc0q2VUIK0cLhWNyFvo/hT8Ww=='
- }
-usages:
- description:
- - Usage metrics for the workspace.
- type: dict
- returned: success
- example: {
- 'value': [
- {
- 'name': {
- 'value': 'DataAnalyzed',
- 'localizedValue': 'Data Analyzed'
- },
- 'unit': 'Bytes',
- 'currentValue': 0,
- 'limit': 524288000,
- 'nextResetTime': '2017-10-03T00:00:00Z',
- 'quotaPeriod': 'P1D'
- }
- ]
- }
-''' # NOQA
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
-from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
-
-try:
- from msrestazure.tools import parse_resource_id
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMLogAnalyticsWorkspaceInfo(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str'),
- tags=dict(type='list'),
- show_shared_keys=dict(type='bool'),
- show_intelligence_packs=dict(type='bool'),
- show_usages=dict(type='bool'),
- show_management_groups=dict(type='bool')
- )
-
- self.results = dict(
- changed=False,
- workspaces=[]
- )
-
- self.resource_group = None
- self.name = None
- self.tags = None
- self.show_intelligence_packs = None
- self.show_shared_keys = None
- self.show_usages = None
- self.show_management_groups = None
-
- super(AzureRMLogAnalyticsWorkspaceInfo, self).__init__(self.module_arg_spec, supports_tags=False, facts_module=True)
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_loganalyticsworkspace_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_loganalyticsworkspace_facts' module has been renamed to 'azure_rm_loganalyticsworkspace_info'",
- version='2.13')
-
- for key in list(self.module_arg_spec.keys()):
- setattr(self, key, kwargs[key])
-
- if self.name:
- item = self.get_workspace()
- response = [item] if item else []
- else:
- response = self.list_by_resource_group()
-
- self.results['workspaces'] = [self.to_dict(x) for x in response if self.has_tags(x.tags, self.tags)]
- return self.results
-
- def get_workspace(self):
- try:
- return self.log_analytics_client.workspaces.get(self.resource_group, self.name)
- except CloudError:
- pass
- return None
-
- def list_by_resource_group(self):
- try:
- return self.log_analytics_client.workspaces.list_by_resource_group(self.resource_group)
- except CloudError:
- pass
- return []
-
- def list_intelligence_packs(self):
- try:
- response = self.log_analytics_client.workspaces.list_intelligence_packs(self.resource_group, self.name)
- return [x.as_dict() for x in response]
- except CloudError as exc:
- self.fail('Error when listing intelligence packs {0}'.format(exc.message or str(exc)))
-
- def list_management_groups(self):
- result = []
- try:
- response = self.log_analytics_client.workspaces.list_management_groups(self.resource_group, self.name)
- while True:
- result.append(response.next().as_dict())
- except StopIteration:
- pass
- except CloudError as exc:
- self.fail('Error when listing management groups {0}'.format(exc.message or str(exc)))
- return result
-
- def list_usages(self):
- result = []
- try:
- response = self.log_analytics_client.workspaces.list_usages(self.resource_group, self.name)
- while True:
- result.append(response.next().as_dict())
- except StopIteration:
- pass
- except CloudError as exc:
- self.fail('Error when listing usages {0}'.format(exc.message or str(exc)))
- return result
-
- def get_shared_keys(self):
- try:
- return self.log_analytics_client.workspaces.get_shared_keys(self.resource_group, self.name).as_dict()
- except CloudError as exc:
- self.fail('Error when getting shared key {0}'.format(exc.message or str(exc)))
-
- def to_dict(self, workspace):
- result = workspace.as_dict()
- result['sku'] = _camel_to_snake(workspace.sku.name)
- if self.show_intelligence_packs:
- result['intelligence_packs'] = self.list_intelligence_packs()
- if self.show_management_groups:
- result['management_groups'] = self.list_management_groups()
- if self.show_shared_keys:
- result['shared_keys'] = self.get_shared_keys()
- if self.show_usages:
- result['usages'] = self.list_usages()
- return result
-
-
-def main():
- AzureRMLogAnalyticsWorkspaceInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_manageddisk.py b/lib/ansible/modules/cloud/azure/azure_rm_manageddisk.py
deleted file mode 100644
index 856cb6d722..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_manageddisk.py
+++ /dev/null
@@ -1,493 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Bruno Medina Bolanos Cacho <bruno.medina@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_manageddisk
-
-version_added: "2.4"
-
-short_description: Manage Azure Manage Disks
-
-description:
- - Create, update and delete an Azure Managed Disk.
-
-notes:
- - This module was called M(azure_rm_managed_disk) before Ansible 2.8. The usage did not change.
-
-options:
- resource_group:
- description:
- - Name of a resource group where the managed disk exists or will be created.
- required: true
- name:
- description:
- - Name of the managed disk.
- required: true
- state:
- description:
- - Assert the state of the managed disk. Use C(present) to create or update a managed disk and C(absent) to delete a managed disk.
- default: present
- choices:
- - absent
- - present
- location:
- description:
- - Valid Azure location. Defaults to location of the resource group.
- storage_account_type:
- description:
- - Type of storage for the managed disk.
- - If not specified, the disk is created as C(Standard_LRS).
- - C(Standard_LRS) is for Standard HDD.
- - C(StandardSSD_LRS) (added in 2.8) is for Standard SSD.
- - C(Premium_LRS) is for Premium SSD.
- - C(UltraSSD_LRS) (added in 2.8) is for Ultra SSD, which is in preview mode, and only available on select instance types.
- - See U(https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disks-types) for more information about disk types.
- choices:
- - Standard_LRS
- - StandardSSD_LRS
- - Premium_LRS
- - UltraSSD_LRS
- create_option:
- description:
- - C(import) from a VHD file in I(source_uri) and C(copy) from previous managed disk I(source_uri).
- choices:
- - empty
- - import
- - copy
- source_uri:
- description:
- - URI to a valid VHD file to be used or the resource ID of the managed disk to copy.
- aliases:
- - source_resource_uri
- os_type:
- description:
- - Type of Operating System.
- - Used when I(create_option=copy) or I(create_option=import) and the source is an OS disk.
- - If omitted during creation, no value is set.
- - If omitted during an update, no change is made.
- - Once set, this value cannot be cleared.
- choices:
- - linux
- - windows
- disk_size_gb:
- description:
- - Size in GB of the managed disk to be created.
- - If I(create_option=copy) then the value must be greater than or equal to the source's size.
- managed_by:
- description:
- - Name of an existing virtual machine with which the disk is or will be associated, this VM should be in the same resource group.
- - To detach a disk from a vm, explicitly set to ''.
- - If this option is unset, the value will not be changed.
- version_added: '2.5'
- attach_caching:
- description:
- - Disk caching policy controlled by VM. Will be used when attached to the VM defined by C(managed_by).
- - If this option is different from the current caching policy, the managed disk will be deattached and attached with current caching option again.
- choices:
- - ''
- - read_only
- - read_write
- version_added: '2.8'
- tags:
- description:
- - Tags to assign to the managed disk.
- - Format tags as 'key' or 'key:value'.
- zone:
- description:
- - The Azure managed disk's zone.
- - Allowed values are C(1), C(2), C(3) and C(' ').
- choices:
- - 1
- - 2
- - 3
- - ''
- version_added: "2.8"
- lun:
- description:
- - The logical unit number for data disk.
- - This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM.
- type: int
- version_added: '2.10'
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-author:
- - Bruno Medina (@brusMX)
-'''
-
-EXAMPLES = '''
- - name: Create managed disk
- azure_rm_manageddisk:
- name: mymanageddisk
- location: eastus
- resource_group: myResourceGroup
- disk_size_gb: 4
-
- - name: Create managed operating system disk from page blob
- azure_rm_manageddisk:
- name: mymanageddisk
- location: eastus2
- resource_group: myResourceGroup
- create_option: import
- source_uri: https://storageaccountname.blob.core.windows.net/containername/blob-name.vhd
- os_type: windows
- storage_account_type: Premium_LRS
-
- - name: Mount the managed disk to VM
- azure_rm_manageddisk:
- name: mymanageddisk
- location: eastus
- resource_group: myResourceGroup
- disk_size_gb: 4
- managed_by: testvm001
- attach_caching: read_only
-
- - name: Unmount the managed disk to VM
- azure_rm_manageddisk:
- name: mymanageddisk
- location: eastus
- resource_group: myResourceGroup
- disk_size_gb: 4
-
- - name: Delete managed disk
- azure_rm_manageddisk:
- name: mymanageddisk
- location: eastus
- resource_group: myResourceGroup
- state: absent
-'''
-
-RETURN = '''
-id:
- description:
- - The managed disk resource ID.
- returned: always
- type: dict
-state:
- description:
- - Current state of the managed disk.
- returned: always
- type: dict
-changed:
- description:
- - Whether or not the resource has changed.
- returned: always
- type: bool
-'''
-
-import re
-
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-try:
- from msrestazure.tools import parse_resource_id
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-# duplicated in azure_rm_manageddisk_facts
-def managed_disk_to_dict(managed_disk):
- create_data = managed_disk.creation_data
- return dict(
- id=managed_disk.id,
- name=managed_disk.name,
- location=managed_disk.location,
- tags=managed_disk.tags,
- create_option=create_data.create_option.lower(),
- source_uri=create_data.source_uri or create_data.source_resource_id,
- disk_size_gb=managed_disk.disk_size_gb,
- os_type=managed_disk.os_type.lower() if managed_disk.os_type else None,
- storage_account_type=managed_disk.sku.name if managed_disk.sku else None,
- managed_by=managed_disk.managed_by,
- zone=managed_disk.zones[0] if managed_disk.zones and len(managed_disk.zones) > 0 else ''
- )
-
-
-class AzureRMManagedDisk(AzureRMModuleBase):
- """Configuration class for an Azure RM Managed Disk resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- ),
- location=dict(
- type='str'
- ),
- storage_account_type=dict(
- type='str',
- choices=['Standard_LRS', 'StandardSSD_LRS', 'Premium_LRS', 'UltraSSD_LRS']
- ),
- create_option=dict(
- type='str',
- choices=['empty', 'import', 'copy']
- ),
- source_uri=dict(
- type='str',
- aliases=['source_resource_uri']
- ),
- os_type=dict(
- type='str',
- choices=['linux', 'windows']
- ),
- disk_size_gb=dict(
- type='int'
- ),
- managed_by=dict(
- type='str'
- ),
- zone=dict(
- type='str',
- choices=['', '1', '2', '3']
- ),
- attach_caching=dict(
- type='str',
- choices=['', 'read_only', 'read_write']
- ),
- lun=dict(
- type='int'
- )
- )
- required_if = [
- ('create_option', 'import', ['source_uri']),
- ('create_option', 'copy', ['source_uri']),
- ('create_option', 'empty', ['disk_size_gb'])
- ]
- self.results = dict(
- changed=False,
- state=dict())
-
- self.resource_group = None
- self.name = None
- self.location = None
- self.storage_account_type = None
- self.create_option = None
- self.source_uri = None
- self.os_type = None
- self.disk_size_gb = None
- self.tags = None
- self.zone = None
- self.managed_by = None
- self.attach_caching = None
- self.lun = None
- super(AzureRMManagedDisk, self).__init__(
- derived_arg_spec=self.module_arg_spec,
- required_if=required_if,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- result = None
- changed = False
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- disk_instance = self.get_managed_disk()
- result = disk_instance
-
- # need create or update
- if self.state == 'present':
- parameter = self.generate_managed_disk_property()
- if not disk_instance or self.is_different(disk_instance, parameter):
- changed = True
- if not self.check_mode:
- result = self.create_or_update_managed_disk(parameter)
- else:
- result = True
-
- # unmount from the old virtual machine and mount to the new virtual machine
- if self.managed_by or self.managed_by == '':
- vm_name = parse_resource_id(disk_instance.get('managed_by', '')).get('name') if disk_instance else None
- vm_name = vm_name or ''
- if self.managed_by != vm_name or self.is_attach_caching_option_different(vm_name, result):
- changed = True
- if not self.check_mode:
- if vm_name:
- self.detach(vm_name, result)
- if self.managed_by:
- self.attach(self.managed_by, result)
- result = self.get_managed_disk()
-
- if self.state == 'absent' and disk_instance:
- changed = True
- if not self.check_mode:
- self.delete_managed_disk()
- result = True
-
- self.results['changed'] = changed
- self.results['state'] = result
- return self.results
-
- def attach(self, vm_name, disk):
- vm = self._get_vm(vm_name)
- # find the lun
- if self.lun:
- lun = self.lun
- else:
- luns = ([d.lun for d in vm.storage_profile.data_disks]
- if vm.storage_profile.data_disks else [])
- lun = max(luns) + 1 if luns else 0
-
- # prepare the data disk
- params = self.compute_models.ManagedDiskParameters(id=disk.get('id'), storage_account_type=disk.get('storage_account_type'))
- caching_options = self.compute_models.CachingTypes[self.attach_caching] if self.attach_caching and self.attach_caching != '' else None
- data_disk = self.compute_models.DataDisk(lun=lun,
- create_option=self.compute_models.DiskCreateOptionTypes.attach,
- managed_disk=params,
- caching=caching_options)
- vm.storage_profile.data_disks.append(data_disk)
- self._update_vm(vm_name, vm)
-
- def detach(self, vm_name, disk):
- vm = self._get_vm(vm_name)
- leftovers = [d for d in vm.storage_profile.data_disks if d.name.lower() != disk.get('name').lower()]
- if len(vm.storage_profile.data_disks) == len(leftovers):
- self.fail("No disk with the name '{0}' was found".format(disk.get('name')))
- vm.storage_profile.data_disks = leftovers
- self._update_vm(vm_name, vm)
-
- def _update_vm(self, name, params):
- try:
- poller = self.compute_client.virtual_machines.create_or_update(self.resource_group, name, params)
- self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error updating virtual machine {0} - {1}".format(name, str(exc)))
-
- def _get_vm(self, name):
- try:
- return self.compute_client.virtual_machines.get(self.resource_group, name, expand='instanceview')
- except Exception as exc:
- self.fail("Error getting virtual machine {0} - {1}".format(name, str(exc)))
-
- def generate_managed_disk_property(self):
- # TODO: Add support for EncryptionSettings, DiskIOPSReadWrite, DiskMBpsReadWrite
- disk_params = {}
- creation_data = {}
- disk_params['location'] = self.location
- disk_params['tags'] = self.tags
- if self.zone:
- disk_params['zones'] = [self.zone]
- if self.storage_account_type:
- storage_account_type = self.compute_models.DiskSku(name=self.storage_account_type)
- disk_params['sku'] = storage_account_type
- disk_params['disk_size_gb'] = self.disk_size_gb
- creation_data['create_option'] = self.compute_models.DiskCreateOption.empty
- if self.create_option == 'import':
- creation_data['create_option'] = self.compute_models.DiskCreateOption.import_enum
- creation_data['source_uri'] = self.source_uri
- elif self.create_option == 'copy':
- creation_data['create_option'] = self.compute_models.DiskCreateOption.copy
- creation_data['source_resource_id'] = self.source_uri
- if self.os_type:
- typecon = {
- 'linux': self.compute_models.OperatingSystemTypes.linux,
- 'windows': self.compute_models.OperatingSystemTypes.windows
- }
- disk_params['os_type'] = typecon[self.os_type]
- else:
- disk_params['os_type'] = None
- disk_params['creation_data'] = creation_data
- return disk_params
-
- def create_or_update_managed_disk(self, parameter):
- try:
- poller = self.compute_client.disks.create_or_update(
- self.resource_group,
- self.name,
- parameter)
- aux = self.get_poller_result(poller)
- return managed_disk_to_dict(aux)
- except CloudError as e:
- self.fail("Error creating the managed disk: {0}".format(str(e)))
-
- # This method accounts for the difference in structure between the
- # Azure retrieved disk and the parameters for the new disk to be created.
- def is_different(self, found_disk, new_disk):
- resp = False
- if new_disk.get('disk_size_gb'):
- if not found_disk['disk_size_gb'] == new_disk['disk_size_gb']:
- resp = True
- if new_disk.get('os_type'):
- if not found_disk['os_type'] == new_disk['os_type']:
- resp = True
- if new_disk.get('sku'):
- if not found_disk['storage_account_type'] == new_disk['sku'].name:
- resp = True
- # Check how to implement tags
- if new_disk.get('tags') is not None:
- if not found_disk['tags'] == new_disk['tags']:
- resp = True
- if self.zone is not None:
- if not found_disk['zone'] == self.zone:
- resp = True
- return resp
-
- def delete_managed_disk(self):
- try:
- poller = self.compute_client.disks.delete(
- self.resource_group,
- self.name)
- return self.get_poller_result(poller)
- except CloudError as e:
- self.fail("Error deleting the managed disk: {0}".format(str(e)))
-
- def get_managed_disk(self):
- try:
- resp = self.compute_client.disks.get(
- self.resource_group,
- self.name)
- return managed_disk_to_dict(resp)
- except CloudError as e:
- self.log('Did not find managed disk')
-
- def is_attach_caching_option_different(self, vm_name, disk):
- resp = False
- if vm_name:
- vm = self._get_vm(vm_name)
- correspondence = next((d for d in vm.storage_profile.data_disks if d.name.lower() == disk.get('name').lower()), None)
- if correspondence and correspondence.caching.name != self.attach_caching:
- resp = True
- if correspondence.caching.name == 'none' and self.attach_caching == '':
- resp = False
- return resp
-
-
-def main():
- """Main execution"""
- AzureRMManagedDisk()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_manageddisk_info.py b/lib/ansible/modules/cloud/azure/azure_rm_manageddisk_info.py
deleted file mode 100644
index cc90cb9dd5..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_manageddisk_info.py
+++ /dev/null
@@ -1,243 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Bruno Medina Bolanos Cacho <bruno.medina@microsoft.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = r'''
----
-module: azure_rm_manageddisk_info
-
-version_added: "2.9"
-
-short_description: Get managed disk facts
-
-description:
- - Get facts for a specific managed disk or all managed disks.
-
-notes:
- - This module was called M(azure_rm_managed_disk_facts) before Ansible 2.8. The usage did not change.
-
-options:
- name:
- description:
- - Limit results to a specific managed disk.
- type: str
- resource_group:
- description:
- - Limit results to a specific resource group.
- type: str
- tags:
- description:
- - Limit results by providing a list of tags.
- - Format tags as 'key' or 'key:value'.
- type: list
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Bruno Medina (@brusMX)
-'''
-
-EXAMPLES = r'''
-- name: Get facts for one managed disk
- azure_rm_manageddisk_info:
- name: Testing
- resource_group: myResourceGroup
-
-- name: Get facts for all managed disks
- azure_rm_manageddisk_info:
-
-- name: Get facts by tags
- azure_rm_manageddisk_info:
- tags:
- - testing
-'''
-
-RETURN = r'''
-azure_managed_disk:
- description:
- - List of managed disk dicts.
- returned: always
- type: list
- contains:
- id:
- description:
- - Resource id.
- type: str
- name:
- description:
- - Name of the managed disk.
- type: str
- location:
- description:
- - Valid Azure location.
- type: str
- storage_account_type:
- description:
- - Type of storage for the managed disk.
- - See U(https://docs.microsoft.com/en-us/azure/virtual-machines/windows/disks-types) for more information about this type.
- type: str
- sample: Standard_LRS
- create_option:
- description:
- - Create option of the disk.
- type: str
- sample: copy
- source_uri:
- description:
- - URI to a valid VHD file to be used or the resource ID of the managed disk to copy.
- type: str
- os_type:
- description:
- - Type of Operating System.
- choices:
- - linux
- - windows
- type: str
- disk_size_gb:
- description:
- - Size in GB of the managed disk to be created.
- type: str
- managed_by:
- description:
- - Name of an existing virtual machine with which the disk is or will be associated, this VM should be in the same resource group.
- type: str
- tags:
- description:
- - Tags to assign to the managed disk.
- type: dict
- sample: { "tag": "value" }
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except Exception:
- # handled in azure_rm_common
- pass
-
-
-# duplicated in azure_rm_manageddisk
-def managed_disk_to_dict(managed_disk):
- create_data = managed_disk.creation_data
- return dict(
- id=managed_disk.id,
- name=managed_disk.name,
- location=managed_disk.location,
- tags=managed_disk.tags,
- create_option=create_data.create_option.lower(),
- source_uri=create_data.source_uri or create_data.source_resource_id,
- disk_size_gb=managed_disk.disk_size_gb,
- os_type=managed_disk.os_type.lower() if managed_disk.os_type else None,
- storage_account_type=managed_disk.sku.name if managed_disk.sku else None,
- managed_by=managed_disk.managed_by,
- zone=managed_disk.zones[0] if managed_disk.zones and len(managed_disk.zones) > 0 else ''
- )
-
-
-class AzureRMManagedDiskInfo(AzureRMModuleBase):
- """Utility class to get managed disk facts"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str'
- ),
- name=dict(
- type='str'
- ),
- tags=dict(
- type='str'
- ),
- )
- self.results = dict(
- ansible_info=dict(
- azure_managed_disk=[]
- )
- )
- self.resource_group = None
- self.name = None
- self.create_option = None
- self.source_uri = None
- self.source_resource_uri = None
- self.tags = None
- super(AzureRMManagedDiskInfo, self).__init__(
- derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- self.results['ansible_info']['azure_managed_disk'] = (
- self.get_item() if self.name
- else (self.list_items_by_resource_group() if self.resource_group else self.list_items())
- )
-
- return self.results
-
- def get_item(self):
- """Get a single managed disk"""
- item = None
- result = []
-
- try:
- item = self.compute_client.disks.get(
- self.resource_group,
- self.name)
- except CloudError:
- pass
-
- if item and self.has_tags(item.tags, self.tags):
- result = [managed_disk_to_dict(item)]
-
- return result
-
- def list_items(self):
- """Get all managed disks"""
- try:
- response = self.compute_client.disks.list()
- except CloudError as exc:
- self.fail('Failed to list all items - {0}'.format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(managed_disk_to_dict(item))
- return results
-
- def list_items_by_resource_group(self):
- """Get managed disks in a resource group"""
- try:
- response = self.compute_client.disks.list_by_resource_group(resource_group_name=self.resource_group)
- except CloudError as exc:
- self.fail('Failed to list items by resource group - {0}'.format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(managed_disk_to_dict(item))
- return results
-
-
-def main():
- """Main module execution code path"""
-
- AzureRMManagedDiskInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_mariadbconfiguration.py b/lib/ansible/modules/cloud/azure/azure_rm_mariadbconfiguration.py
deleted file mode 100644
index 212cf7959d..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_mariadbconfiguration.py
+++ /dev/null
@@ -1,241 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_mariadbconfiguration
-version_added: "2.8"
-short_description: Manage Configuration instance
-description:
- - Create, update and delete instance of Configuration.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource.
- required: True
- server_name:
- description:
- - The name of the server.
- required: True
- name:
- description:
- - The name of the server configuration.
- required: True
- value:
- description:
- - Value of the configuration.
- state:
- description:
- - Assert the state of the MariaDB configuration. Use C(present) to update setting, or C(absent) to reset to default value.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
- - Matti Ranta (@techknowlogick)
-'''
-
-EXAMPLES = '''
- - name: Update SQL Server setting
- azure_rm_mariadbconfiguration:
- resource_group: myResourceGroup
- server_name: myServer
- name: event_scheduler
- value: "ON"
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myServer/confi
- gurations/event_scheduler"
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from azure.mgmt.rdbms.mysql import MariaDBManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMMariaDbConfiguration(AzureRMModuleBase):
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- value=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.server_name = None
- self.name = None
- self.value = None
-
- self.results = dict(changed=False)
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMMariaDbConfiguration, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
-
- old_response = None
- response = None
-
- old_response = self.get_configuration()
-
- if not old_response:
- self.log("Configuration instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("Configuration instance already exists")
- if self.state == 'absent' and old_response['source'] == 'user-override':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.log("Need to check if Configuration instance has to be deleted or may be updated")
- if self.value != old_response.get('value'):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the Configuration instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_configuration()
-
- self.results['changed'] = True
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("Configuration instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_configuration()
- else:
- self.log("Configuration instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
-
- return self.results
-
- def create_update_configuration(self):
- self.log("Creating / Updating the Configuration instance {0}".format(self.name))
-
- try:
- response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group,
- server_name=self.server_name,
- configuration_name=self.name,
- value=self.value,
- source='user-override')
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the Configuration instance.')
- self.fail("Error creating the Configuration instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_configuration(self):
- self.log("Deleting the Configuration instance {0}".format(self.name))
- try:
- response = self.mariadb_client.configurations.create_or_update(resource_group_name=self.resource_group,
- server_name=self.server_name,
- configuration_name=self.name,
- source='system-default')
- except CloudError as e:
- self.log('Error attempting to delete the Configuration instance.')
- self.fail("Error deleting the Configuration instance: {0}".format(str(e)))
-
- return True
-
- def get_configuration(self):
- self.log("Checking if the Configuration instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mariadb_client.configurations.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- configuration_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("Configuration instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the Configuration instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMMariaDbConfiguration()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_mariadbconfiguration_info.py b/lib/ansible/modules/cloud/azure/azure_rm_mariadbconfiguration_info.py
deleted file mode 100644
index ad38f1255f..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_mariadbconfiguration_info.py
+++ /dev/null
@@ -1,216 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_mariadbconfiguration_info
-version_added: "2.9"
-short_description: Get Azure MariaDB Configuration facts
-description:
- - Get facts of Azure MariaDB Configuration.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- type: str
- server_name:
- description:
- - The name of the server.
- required: True
- type: str
- name:
- description:
- - Setting name.
- type: str
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
- - Matti Ranta (@techknowlogick)
-
-'''
-
-EXAMPLES = '''
- - name: Get specific setting of MariaDB Server
- azure_rm_mariadbconfiguration_info:
- resource_group: myResourceGroup
- server_name: testserver
- name: deadlock_timeout
-
- - name: Get all settings of MariaDB Server
- azure_rm_mariadbconfiguration_info:
- resource_group: myResourceGroup
- server_name: server_name
-'''
-
-RETURN = '''
-settings:
- description:
- - A list of dictionaries containing MariaDB Server settings.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Setting resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver
- /configurations/deadlock_timeout"
- name:
- description:
- - Setting name.
- returned: always
- type: str
- sample: deadlock_timeout
- value:
- description:
- - Setting value.
- returned: always
- type: raw
- sample: 1000
- description:
- description:
- - Description of the configuration.
- returned: always
- type: str
- sample: Deadlock timeout.
- source:
- description:
- - Source of the configuration.
- returned: always
- type: str
- sample: system-default
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.azure_operation import AzureOperationPoller
- from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMMariaDbConfigurationInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
- # store the results of the module operation
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.resource_group = None
- self.server_name = None
- self.name = None
- super(AzureRMMariaDbConfigurationInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_mariadbconfiguration_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_mariadbconfiguration_facts' module has been renamed to 'azure_rm_mariadbconfiguration_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if self.name is not None:
- self.results['settings'] = self.get()
- else:
- self.results['settings'] = self.list_by_server()
- return self.results
-
- def get(self):
- '''
- Gets facts of the specified MariaDB Configuration.
-
- :return: deserialized MariaDB Configurationinstance state dictionary
- '''
- response = None
- results = []
- try:
- response = self.mgmt_client.configurations.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- configuration_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Configurations.')
-
- if response is not None:
- results.append(self.format_item(response))
-
- return results
-
- def list_by_server(self):
- '''
- Gets facts of the specified MariaDB Configuration.
-
- :return: deserialized MariaDB Configurationinstance state dictionary
- '''
- response = None
- results = []
- try:
- response = self.mgmt_client.configurations.list_by_server(resource_group_name=self.resource_group,
- server_name=self.server_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Configurations.')
-
- if response is not None:
- for item in response:
- results.append(self.format_item(item))
-
- return results
-
- def format_item(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.resource_group,
- 'server_name': self.server_name,
- 'id': d['id'],
- 'name': d['name'],
- 'value': d['value'],
- 'description': d['description'],
- 'source': d['source']
- }
- return d
-
-
-def main():
- AzureRMMariaDbConfigurationInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_mariadbdatabase.py b/lib/ansible/modules/cloud/azure/azure_rm_mariadbdatabase.py
deleted file mode 100644
index 8492b96854..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_mariadbdatabase.py
+++ /dev/null
@@ -1,304 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_mariadbdatabase
-version_added: "2.8"
-short_description: Manage MariaDB Database instance
-description:
- - Create, update and delete instance of MariaDB Database.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- server_name:
- description:
- - The name of the server.
- required: True
- name:
- description:
- - The name of the database.
- required: True
- charset:
- description:
- - The charset of the database. Check MariaDB documentation for possible values.
- - This is only set on creation, use I(force_update) to recreate a database if the values don't match.
- collation:
- description:
- - The collation of the database. Check MariaDB documentation for possible values.
- - This is only set on creation, use I(force_update) to recreate a database if the values don't match.
- force_update:
- description:
- - When set to C(true), will delete and recreate the existing MariaDB database if any of the properties don't match what is set.
- - When set to C(false), no change will occur to the database even if any of the properties do not match.
- type: bool
- default: 'no'
- state:
- description:
- - Assert the state of the MariaDB Database. Use C(present) to create or update a database and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
- - Matti Ranta (@techknowlogick)
-
-'''
-
-EXAMPLES = '''
- - name: Create (or update) MariaDB Database
- azure_rm_mariadbdatabase:
- resource_group: myResourceGroup
- server_name: testserver
- name: db1
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver/databases/db1
-name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: db1
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMMariaDbDatabase(AzureRMModuleBase):
- """Configuration class for an Azure RM MariaDB Database resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- charset=dict(
- type='str'
- ),
- collation=dict(
- type='str'
- ),
- force_update=dict(
- type='bool',
- default=False
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.server_name = None
- self.name = None
- self.force_update = None
- self.parameters = dict()
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMMariaDbDatabase, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- if key == "charset":
- self.parameters["charset"] = kwargs[key]
- elif key == "collation":
- self.parameters["collation"] = kwargs[key]
-
- old_response = None
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- resource_group = self.get_resource_group(self.resource_group)
-
- old_response = self.get_mariadbdatabase()
-
- if not old_response:
- self.log("MariaDB Database instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("MariaDB Database instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.log("Need to check if MariaDB Database instance has to be deleted or may be updated")
- if ('collation' in self.parameters) and (self.parameters['collation'] != old_response['collation']):
- self.to_do = Actions.Update
- if ('charset' in self.parameters) and (self.parameters['charset'] != old_response['charset']):
- self.to_do = Actions.Update
- if self.to_do == Actions.Update:
- if self.force_update:
- if not self.check_mode:
- self.delete_mariadbdatabase()
- else:
- self.fail("Database properties cannot be updated without setting 'force_update' option")
- self.to_do = Actions.NoAction
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the MariaDB Database instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_mariadbdatabase()
- self.results['changed'] = True
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("MariaDB Database instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_mariadbdatabase()
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_mariadbdatabase():
- time.sleep(20)
- else:
- self.log("MariaDB Database instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
- self.results["name"] = response["name"]
-
- return self.results
-
- def create_update_mariadbdatabase(self):
- '''
- Creates or updates MariaDB Database with the specified configuration.
-
- :return: deserialized MariaDB Database instance state dictionary
- '''
- self.log("Creating / Updating the MariaDB Database instance {0}".format(self.name))
-
- try:
- response = self.mgmt_client.databases.create_or_update(resource_group_name=self.resource_group,
- server_name=self.server_name,
- database_name=self.name,
- parameters=self.parameters)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the MariaDB Database instance.')
- self.fail("Error creating the MariaDB Database instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_mariadbdatabase(self):
- '''
- Deletes specified MariaDB Database instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the MariaDB Database instance {0}".format(self.name))
- try:
- response = self.mgmt_client.databases.delete(resource_group_name=self.resource_group,
- server_name=self.server_name,
- database_name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the MariaDB Database instance.')
- self.fail("Error deleting the MariaDB Database instance: {0}".format(str(e)))
-
- return True
-
- def get_mariadbdatabase(self):
- '''
- Gets the properties of the specified MariaDB Database.
-
- :return: deserialized MariaDB Database instance state dictionary
- '''
- self.log("Checking if the MariaDB Database instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mgmt_client.databases.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- database_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("MariaDB Database instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the MariaDB Database instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMMariaDbDatabase()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_mariadbdatabase_info.py b/lib/ansible/modules/cloud/azure/azure_rm_mariadbdatabase_info.py
deleted file mode 100644
index 61e33015b1..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_mariadbdatabase_info.py
+++ /dev/null
@@ -1,211 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_mariadbdatabase_info
-version_added: "2.9"
-short_description: Get Azure MariaDB Database facts
-description:
- - Get facts of MariaDB Database.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- type: str
- server_name:
- description:
- - The name of the server.
- required: True
- type: str
- name:
- description:
- - The name of the database.
- type: str
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
- - Matti Ranta (@techknowlogick)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of MariaDB Database
- azure_rm_mariadbdatabase_info:
- resource_group: myResourceGroup
- server_name: server_name
- name: database_name
-
- - name: List instances of MariaDB Database
- azure_rm_mariadbdatabase_info:
- resource_group: myResourceGroup
- server_name: server_name
-'''
-
-RETURN = '''
-databases:
- description:
- - A list of dictionaries containing facts for MariaDB Databases.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testser
- ver/databases/db1"
- resource_group:
- description:
- - Resource group name.
- returned: always
- type: str
- sample: testrg
- server_name:
- description:
- - Server name.
- returned: always
- type: str
- sample: testserver
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: db1
- charset:
- description:
- - The charset of the database.
- returned: always
- type: str
- sample: UTF8
- collation:
- description:
- - The collation of the database.
- returned: always
- type: str
- sample: English_United States.1252
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMMariaDbDatabaseInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.resource_group = None
- self.server_name = None
- self.name = None
- super(AzureRMMariaDbDatabaseInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_mariadbdatabase_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_mariadbdatabase_facts' module has been renamed to 'azure_rm_mariadbdatabase_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if (self.resource_group is not None and
- self.server_name is not None and
- self.name is not None):
- self.results['databases'] = self.get()
- elif (self.resource_group is not None and
- self.server_name is not None):
- self.results['databases'] = self.list_by_server()
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mariadb_client.databases.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- database_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Databases.')
-
- if response is not None:
- results.append(self.format_item(response))
-
- return results
-
- def list_by_server(self):
- response = None
- results = []
- try:
- response = self.mariadb_client.databases.list_by_server(resource_group_name=self.resource_group,
- server_name=self.server_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e)))
-
- if response is not None:
- for item in response:
- results.append(self.format_item(item))
-
- return results
-
- def format_item(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.resource_group,
- 'server_name': self.server_name,
- 'name': d['name'],
- 'charset': d['charset'],
- 'collation': d['collation']
- }
- return d
-
-
-def main():
- AzureRMMariaDbDatabaseInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_mariadbfirewallrule.py b/lib/ansible/modules/cloud/azure/azure_rm_mariadbfirewallrule.py
deleted file mode 100644
index 1fc8c5e79e..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_mariadbfirewallrule.py
+++ /dev/null
@@ -1,277 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
-# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_mariadbfirewallrule
-version_added: "2.8"
-short_description: Manage MariaDB firewall rule instance
-description:
- - Create, update and delete instance of MariaDB firewall rule.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- server_name:
- description:
- - The name of the server.
- required: True
- name:
- description:
- - The name of the MariaDB firewall rule.
- required: True
- start_ip_address:
- description:
- - The start IP address of the MariaDB firewall rule. Must be IPv4 format.
- end_ip_address:
- description:
- - The end IP address of the MariaDB firewall rule. Must be IPv4 format.
- state:
- description:
- - Assert the state of the MariaDB firewall rule. Use C(present) to create or update a rule and C(absent) to ensure it is not present.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
- - Matti Ranta (@techknowlogick)
-
-'''
-
-EXAMPLES = '''
- - name: Create (or update) MariaDB firewall rule
- azure_rm_mariadbfirewallrule:
- resource_group: myResourceGroup
- server_name: testserver
- name: rule1
- start_ip_address: 10.0.0.17
- end_ip_address: 10.0.0.20
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/testserver/fire
- wallRules/rule1"
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMMariaDbFirewallRule(AzureRMModuleBase):
- """Configuration class for an Azure RM MariaDB firewall rule resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- start_ip_address=dict(
- type='str'
- ),
- end_ip_address=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.server_name = None
- self.name = None
- self.start_ip_address = None
- self.end_ip_address = None
-
- self.results = dict(changed=False)
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMMariaDbFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
-
- old_response = None
- response = None
-
- resource_group = self.get_resource_group(self.resource_group)
-
- old_response = self.get_firewallrule()
-
- if not old_response:
- self.log("MariaDB firewall rule instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("MariaDB firewall rule instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.log("Need to check if MariaDB firewall rule instance has to be deleted or may be updated")
- if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']):
- self.to_do = Actions.Update
- if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the MariaDB firewall rule instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_firewallrule()
-
- if not old_response:
- self.results['changed'] = True
- else:
- self.results['changed'] = old_response.__ne__(response)
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("MariaDB firewall rule instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_firewallrule()
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_firewallrule():
- time.sleep(20)
- else:
- self.log("MariaDB firewall rule instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
-
- return self.results
-
- def create_update_firewallrule(self):
- '''
- Creates or updates MariaDB firewall rule with the specified configuration.
-
- :return: deserialized MariaDB firewall rule instance state dictionary
- '''
- self.log("Creating / Updating the MariaDB firewall rule instance {0}".format(self.name))
-
- try:
- response = self.mariadb_client.firewall_rules.create_or_update(resource_group_name=self.resource_group,
- server_name=self.server_name,
- firewall_rule_name=self.name,
- start_ip_address=self.start_ip_address,
- end_ip_address=self.end_ip_address)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the MariaDB firewall rule instance.')
- self.fail("Error creating the MariaDB firewall rule instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_firewallrule(self):
- '''
- Deletes specified MariaDB firewall rule instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the MariaDB firewall rule instance {0}".format(self.name))
- try:
- response = self.mariadb_client.firewall_rules.delete(resource_group_name=self.resource_group,
- server_name=self.server_name,
- firewall_rule_name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the MariaDB firewall rule instance.')
- self.fail("Error deleting the MariaDB firewall rule instance: {0}".format(str(e)))
-
- return True
-
- def get_firewallrule(self):
- '''
- Gets the properties of the specified MariaDB firewall rule.
-
- :return: deserialized MariaDB firewall rule instance state dictionary
- '''
- self.log("Checking if the MariaDB firewall rule instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mariadb_client.firewall_rules.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- firewall_rule_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("MariaDB firewall rule instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the MariaDB firewall rule instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMMariaDbFirewallRule()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_mariadbfirewallrule_info.py b/lib/ansible/modules/cloud/azure/azure_rm_mariadbfirewallrule_info.py
deleted file mode 100644
index 45557b5113..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_mariadbfirewallrule_info.py
+++ /dev/null
@@ -1,207 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
-# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_mariadbfirewallrule_info
-version_added: "2.9"
-short_description: Get Azure MariaDB Firewall Rule facts
-description:
- - Get facts of Azure MariaDB Firewall Rule.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- type: str
- server_name:
- description:
- - The name of the server.
- required: True
- type: str
- name:
- description:
- - The name of the server firewall rule.
- type: str
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
- - Matti Ranta (@techknowlogick)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of MariaDB Firewall Rule
- azure_rm_mariadbfirewallrule_info:
- resource_group: myResourceGroup
- server_name: server_name
- name: firewall_rule_name
-
- - name: List instances of MariaDB Firewall Rule
- azure_rm_mariadbfirewallrule_info:
- resource_group: myResourceGroup
- server_name: server_name
-'''
-
-RETURN = '''
-rules:
- description:
- - A list of dictionaries containing facts for MariaDB Firewall Rule.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.DBforMariaDB/servers/testserver/fire
- wallRules/rule1"
- server_name:
- description:
- - The name of the server.
- returned: always
- type: str
- sample: testserver
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: rule1
- start_ip_address:
- description:
- - The start IP address of the MariaDB firewall rule.
- returned: always
- type: str
- sample: 10.0.0.16
- end_ip_address:
- description:
- - The end IP address of the MariaDB firewall rule.
- returned: always
- type: str
- sample: 10.0.0.18
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.azure_operation import AzureOperationPoller
- from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMMariaDbFirewallRuleInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.server_name = None
- self.name = None
- super(AzureRMMariaDbFirewallRuleInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_mariadbfirewallrule_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_mariadbfirewallrule_facts' module has been renamed to 'azure_rm_mariadbfirewallrule_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(MariaDBManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if (self.name is not None):
- self.results['rules'] = self.get()
- else:
- self.results['rules'] = self.list_by_server()
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.firewall_rules.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- firewall_rule_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for FirewallRules.')
-
- if response is not None:
- results.append(self.format_item(response))
-
- return results
-
- def list_by_server(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.firewall_rules.list_by_server(resource_group_name=self.resource_group,
- server_name=self.server_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for FirewallRules.')
-
- if response is not None:
- for item in response:
- results.append(self.format_item(item))
-
- return results
-
- def format_item(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.resource_group,
- 'id': d['id'],
- 'server_name': self.server_name,
- 'name': d['name'],
- 'start_ip_address': d['start_ip_address'],
- 'end_ip_address': d['end_ip_address']
- }
- return d
-
-
-def main():
- AzureRMMariaDbFirewallRuleInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_mariadbserver.py b/lib/ansible/modules/cloud/azure/azure_rm_mariadbserver.py
deleted file mode 100644
index 30a2998844..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_mariadbserver.py
+++ /dev/null
@@ -1,388 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_mariadbserver
-version_added: "2.8"
-short_description: Manage MariaDB Server instance
-description:
- - Create, update and delete instance of MariaDB Server.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- name:
- description:
- - The name of the server.
- required: True
- sku:
- description:
- - The SKU (pricing tier) of the server.
- suboptions:
- name:
- description:
- - The name of the SKU, typically, tier + family + cores, for example C(B_Gen4_1), C(GP_Gen5_8).
- tier:
- description:
- - The tier of the particular SKU, for example C(Basic).
- choices:
- - basic
- - standard
- capacity:
- description:
- - The scale up/out capacity, representing server's compute units.
- type: int
- size:
- description:
- - The size code, to be interpreted by resource as appropriate.
- location:
- description:
- - Resource location. If not set, location from the resource group will be used as default.
- storage_mb:
- description:
- - The maximum storage allowed for a server.
- type: int
- version:
- description:
- - Server version.
- choices:
- - 10.2
- enforce_ssl:
- description:
- - Enable SSL enforcement.
- type: bool
- default: False
- admin_username:
- description:
- - The administrator's login name of a server. Can only be specified when the server is being created (and is required for creation).
- admin_password:
- description:
- - The password of the administrator login.
- create_mode:
- description:
- - Create mode of SQL Server.
- default: Default
- state:
- description:
- - Assert the state of the MariaDB Server. Use C(present) to create or update a server and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
- - Matti Ranta (@techknowlogick)
-
-'''
-
-EXAMPLES = '''
- - name: Create (or update) MariaDB Server
- azure_rm_mariadbserver:
- resource_group: myResourceGroup
- name: testserver
- sku:
- name: B_Gen5_1
- tier: Basic
- location: eastus
- storage_mb: 1024
- enforce_ssl: True
- version: 10.2
- admin_username: cloudsa
- admin_password: password
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/mariadbsrv1b6dd89593
-version:
- description:
- - Server version. Possible values include C(10.2).
- returned: always
- type: str
- sample: 10.2
-state:
- description:
- - A state of a server that is visible to user. Possible values include C(Ready), C(Dropping), C(Disabled).
- returned: always
- type: str
- sample: Ready
-fully_qualified_domain_name:
- description:
- - The fully qualified domain name of a server.
- returned: always
- type: str
- sample: mariadbsrv1b6dd89593.mariadb.database.azure.com
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMMariaDbServers(AzureRMModuleBase):
- """Configuration class for an Azure RM MariaDB Server resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- sku=dict(
- type='dict'
- ),
- location=dict(
- type='str'
- ),
- storage_mb=dict(
- type='int'
- ),
- version=dict(
- type='str',
- choices=['10.2']
- ),
- enforce_ssl=dict(
- type='bool',
- default=False
- ),
- create_mode=dict(
- type='str',
- default='Default'
- ),
- admin_username=dict(
- type='str'
- ),
- admin_password=dict(
- type='str',
- no_log=True
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.name = None
- self.parameters = dict()
- self.tags = None
-
- self.results = dict(changed=False)
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMMariaDbServers, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- if key == "sku":
- ev = kwargs[key]
- if 'tier' in ev:
- if ev['tier'] == 'basic':
- ev['tier'] = 'Basic'
- elif ev['tier'] == 'standard':
- ev['tier'] = 'Standard'
- self.parameters["sku"] = ev
- elif key == "location":
- self.parameters["location"] = kwargs[key]
- elif key == "storage_mb":
- self.parameters.setdefault("properties", {}).setdefault("storage_profile", {})["storage_mb"] = kwargs[key]
- elif key == "version":
- self.parameters.setdefault("properties", {})["version"] = kwargs[key]
- elif key == "enforce_ssl":
- self.parameters.setdefault("properties", {})["ssl_enforcement"] = 'Enabled' if kwargs[key] else 'Disabled'
- elif key == "create_mode":
- self.parameters.setdefault("properties", {})["create_mode"] = kwargs[key]
- elif key == "admin_username":
- self.parameters.setdefault("properties", {})["administrator_login"] = kwargs[key]
- elif key == "admin_password":
- self.parameters.setdefault("properties", {})["administrator_login_password"] = kwargs[key]
-
- old_response = None
- response = None
-
- resource_group = self.get_resource_group(self.resource_group)
-
- if "location" not in self.parameters:
- self.parameters["location"] = resource_group.location
-
- old_response = self.get_mariadbserver()
-
- if not old_response:
- self.log("MariaDB Server instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("MariaDB Server instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.log("Need to check if MariaDB Server instance has to be deleted or may be updated")
- update_tags, newtags = self.update_tags(old_response.get('tags', {}))
- if update_tags:
- self.tags = newtags
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the MariaDB Server instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_mariadbserver()
-
- if not old_response:
- self.results['changed'] = True
- else:
- self.results['changed'] = old_response.__ne__(response)
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("MariaDB Server instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_mariadbserver()
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_mariadbserver():
- time.sleep(20)
- else:
- self.log("MariaDB Server instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
- self.results["version"] = response["version"]
- self.results["state"] = response["user_visible_state"]
- self.results["fully_qualified_domain_name"] = response["fully_qualified_domain_name"]
-
- return self.results
-
- def create_update_mariadbserver(self):
- '''
- Creates or updates MariaDB Server with the specified configuration.
-
- :return: deserialized MariaDB Server instance state dictionary
- '''
- self.log("Creating / Updating the MariaDB Server instance {0}".format(self.name))
-
- try:
- self.parameters['tags'] = self.tags
- if self.to_do == Actions.Create:
- response = self.mariadb_client.servers.create(resource_group_name=self.resource_group,
- server_name=self.name,
- parameters=self.parameters)
- else:
- # structure of parameters for update must be changed
- self.parameters.update(self.parameters.pop("properties", {}))
- response = self.mariadb_client.servers.update(resource_group_name=self.resource_group,
- server_name=self.name,
- parameters=self.parameters)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the MariaDB Server instance.')
- self.fail("Error creating the MariaDB Server instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_mariadbserver(self):
- '''
- Deletes specified MariaDB Server instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the MariaDB Server instance {0}".format(self.name))
- try:
- response = self.mariadb_client.servers.delete(resource_group_name=self.resource_group,
- server_name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the MariaDB Server instance.')
- self.fail("Error deleting the MariaDB Server instance: {0}".format(str(e)))
-
- return True
-
- def get_mariadbserver(self):
- '''
- Gets the properties of the specified MariaDB Server.
-
- :return: deserialized MariaDB Server instance state dictionary
- '''
- self.log("Checking if the MariaDB Server instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mariadb_client.servers.get(resource_group_name=self.resource_group,
- server_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("MariaDB Server instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the MariaDB Server instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMMariaDbServers()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_mariadbserver_info.py b/lib/ansible/modules/cloud/azure/azure_rm_mariadbserver_info.py
deleted file mode 100644
index ffe52c5d37..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_mariadbserver_info.py
+++ /dev/null
@@ -1,264 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-# Copyright (c) 2019 Matti Ranta, (@techknowlogick)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_mariadbserver_info
-version_added: "2.9"
-short_description: Get Azure MariaDB Server facts
-description:
- - Get facts of MariaDB Server.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- type: str
- name:
- description:
- - The name of the server.
- type: str
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- type: list
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
- - Matti Ranta (@techknowlogick)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of MariaDB Server
- azure_rm_mariadbserver_info:
- resource_group: myResourceGroup
- name: server_name
-
- - name: List instances of MariaDB Server
- azure_rm_mariadbserver_info:
- resource_group: myResourceGroup
-'''
-
-RETURN = '''
-servers:
- description:
- - A list of dictionaries containing facts for MariaDB servers.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMariaDB/servers/myabdud1223
- resource_group:
- description:
- - Resource group name.
- returned: always
- type: str
- sample: myResourceGroup
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: myabdud1223
- location:
- description:
- - The location the resource resides in.
- returned: always
- type: str
- sample: eastus
- sku:
- description:
- - The SKU of the server.
- returned: always
- type: complex
- contains:
- name:
- description:
- - The name of the SKU.
- returned: always
- type: str
- sample: GP_Gen4_2
- tier:
- description:
- - The tier of the particular SKU.
- returned: always
- type: str
- sample: GeneralPurpose
- capacity:
- description:
- - The scale capacity.
- returned: always
- type: int
- sample: 2
- storage_mb:
- description:
- - The maximum storage allowed for a server.
- returned: always
- type: int
- sample: 128000
- enforce_ssl:
- description:
- - Enable SSL enforcement.
- returned: always
- type: bool
- sample: False
- admin_username:
- description:
- - The administrator's login name of a server.
- returned: always
- type: str
- sample: serveradmin
- version:
- description:
- - Server version.
- returned: always
- type: str
- sample: "9.6"
- user_visible_state:
- description:
- - A state of a server that is visible to user.
- returned: always
- type: str
- sample: Ready
- fully_qualified_domain_name:
- description:
- - The fully qualified domain name of a server.
- returned: always
- type: str
- sample: myabdud1223.mys.database.azure.com
- tags:
- description:
- - Tags assigned to the resource. Dictionary of string:string pairs.
- type: dict
- sample: { tag1: abc }
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.rdbms.mariadb import MariaDBManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMMariaDbServerInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.resource_group = None
- self.name = None
- self.tags = None
- super(AzureRMMariaDbServerInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_mariadbserver_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_mariadbserver_facts' module has been renamed to 'azure_rm_mariadbserver_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if (self.resource_group is not None and
- self.name is not None):
- self.results['servers'] = self.get()
- elif (self.resource_group is not None):
- self.results['servers'] = self.list_by_resource_group()
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mariadb_client.servers.get(resource_group_name=self.resource_group,
- server_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for MariaDB Server.')
-
- if response and self.has_tags(response.tags, self.tags):
- results.append(self.format_item(response))
-
- return results
-
- def list_by_resource_group(self):
- response = None
- results = []
- try:
- response = self.mariadb_client.servers.list_by_resource_group(resource_group_name=self.resource_group)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for MariaDB Servers.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_item(item))
-
- return results
-
- def format_item(self, item):
- d = item.as_dict()
- d = {
- 'id': d['id'],
- 'resource_group': self.resource_group,
- 'name': d['name'],
- 'sku': d['sku'],
- 'location': d['location'],
- 'storage_mb': d['storage_profile']['storage_mb'],
- 'version': d['version'],
- 'enforce_ssl': (d['ssl_enforcement'] == 'Enabled'),
- 'admin_username': d['administrator_login'],
- 'user_visible_state': d['user_visible_state'],
- 'fully_qualified_domain_name': d['fully_qualified_domain_name'],
- 'tags': d.get('tags')
- }
-
- return d
-
-
-def main():
- AzureRMMariaDbServerInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_monitorlogprofile.py b/lib/ansible/modules/cloud/azure/azure_rm_monitorlogprofile.py
deleted file mode 100644
index a27ac834a7..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_monitorlogprofile.py
+++ /dev/null
@@ -1,392 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yunge Zhu, <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_monitorlogprofile
-version_added: "2.9"
-short_description: Manage Azure Monitor log profile
-description:
- - Create, update and delete instance of Azure Monitor log profile.
-
-options:
- name:
- description:
- - Unique name of the log profile to create or update.
- required: True
- type: str
- location:
- description:
- - Resource location.
- type: str
- locations:
- description:
- - List of regions for which Activity Log events should be stored.
- type: list
- categories:
- description:
- - List of categories of logs. These categories are created as is convenient to user. Some Values are C(Write), C(Delete) and/or C(Action).
- type: list
- retention_policy:
- description:
- - Retention policy for events in the log.
- type: dict
- suboptions:
- enabled:
- description:
- - Whether the retention policy is enabled.
- type: bool
- days:
- description:
- - The number of days for the retention. A value of 0 will retain the events indefinitely.
- type: int
- service_bus_rule_id:
- description:
- - The service bus rule ID of the service bus namespace in which you would like to have Event Hubs created for streaming in the Activity Log.
- - Format like {service_bus_resource_id}/authorizationrules{key_name}.
- type: str
- storage_account:
- description:
- - The storage account to which send the Activity Log.
- - It could be a resource ID.
- - It could be a dict containing I(resource_grorup) and I(name).
- type: raw
- state:
- description:
- - Assert the state of the log profile.
- - Use C(present) to create or update a log profile and C(absent) to delete it.
- default: present
- type: str
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yunge Zhu(@yungezz)
-
-'''
-
-EXAMPLES = '''
- - name: Create a log profile
- azure_rm_monitorlogprofile:
- name: myProfile
- location: eastus
- locations:
- - eastus
- - westus
- categories:
- - Write
- - Action
- retention_policy:
- enabled: False
- days: 1
- storage_account:
- resource_group: myResourceGroup
- name: myStorageAccount
- register: output
-
- - name: Delete a log profile
- azure_rm_monitorlogprofile:
- name: myProfile
- state: absent
-'''
-
-RETURN = '''
-id:
- description:
- - ID of the log profile.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/microsoft.insights/logprofiles/myProfile
-
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.azure_operation import AzureOperationPoller
- from msrestazure.tools import is_valid_resource_id
- from msrest.serialization import Model
- from azure.mgmt.monitor.models import (RetentionPolicy, LogProfileResource, ErrorResponseException)
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-retention_policy_spec = dict(
- enabled=dict(type='bool'),
- days=dict(type='int')
-)
-
-
-def logprofile_to_dict(profile):
- return dict(
- id=profile.id,
- name=profile.name,
- location=profile.location,
- locations=profile.locations,
- categories=profile.categories,
- storage_account=profile.storage_account_id,
- service_bus_rule_id=profile.service_bus_rule_id,
- retention_policy=dict(
- enabled=profile.retention_policy.enabled,
- days=profile.retention_policy.days
- ),
- tags=profile.tags if profile.tags else None
- )
-
-
-class Actions:
- NoAction, CreateOrUpdate, Delete = range(3)
-
-
-class AzureRMMonitorLogprofile(AzureRMModuleBase):
- """Configuration class for an Azure RM Monitor log profile"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- locations=dict(
- type='list',
- elements='str'
- ),
- categories=dict(
- type='list',
- elements='str'
- ),
- retention_policy=dict(
- type='dict',
- options=retention_policy_spec
- ),
- service_bus_rule_id=dict(
- type='str'
- ),
- storage_account=dict(
- type='raw'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self._client = None
-
- self.name = None
- self.location = None
-
- self.locations = None
- self.categories = None
- self.retention_policy = False
- self.service_bus_rule_id = None
- self.storage_account = None
-
- self.tags = None
-
- self.results = dict(
- changed=False,
- id=None
- )
- self.state = None
-
- super(AzureRMMonitorLogprofile, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- old_response = None
- response = None
- to_be_updated = False
-
- # get storage account id
- if self.storage_account:
- if isinstance(self.storage_account, dict):
- self.storage_account = format_resource_id(val=self.storage_account['name'],
- subscription_id=self.storage_account.get('subscription') or self.subscription_id,
- namespace='Microsoft.Storage',
- types='storageAccounts',
- resource_group=self.storage_account.get('resource_group'))
- elif not is_valid_resource_id(self.storage_account):
- self.fail("storage_account either be a resource id or a dict containing resource_group and name")
-
- # get existing log profile
- old_response = self.get_logprofile()
-
- if old_response:
- self.results['id'] = old_response['id']
-
- if self.state == 'present':
- # if profile not exists, create new
- if not old_response:
- self.log("Log profile instance doesn't exist")
-
- to_be_updated = True
- self.to_do = Actions.CreateOrUpdate
-
- else:
- # log profile exists already, do update
- self.log("Log profile instance already exists")
-
- update_tags, self.tags = self.update_tags(old_response.get('tags', None))
-
- if update_tags:
- to_be_updated = True
- self.to_do = Actions.CreateOrUpdate
-
- # check if update
- if self.check_update(old_response):
- to_be_updated = True
- self.to_do = Actions.CreateOrUpdate
-
- elif self.state == 'absent':
- if old_response:
- self.log("Delete log profile instance")
- self.results['id'] = old_response['id']
- to_be_updated = True
- self.to_do = Actions.Delete
- else:
- self.results['changed'] = False
- self.log("Log profile {0} not exists.".format(self.name))
-
- if to_be_updated:
- self.log('Need to Create/Update log profile')
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- if self.to_do == Actions.CreateOrUpdate:
- response = self.create_or_update_logprofile()
- self.results['id'] = response['id']
-
- if self.to_do == Actions.Delete:
- self.delete_logprofile()
- self.log('Log profile instance deleted')
-
- return self.results
-
- def check_update(self, existing):
- if self.locations is not None and existing['locations'] != self.locations:
- self.log("locations diff: origin {0} / update {1}".format(existing['locations'], self.locations))
- return True
- if self.retention_policy is not None:
- if existing['retention_policy']['enabled'] != self.retention_policy['enabled']:
- self.log("retention_policy diff: origin {0} / update {1}".format(str(existing['sku']['name']), str(self.retention_policy['enabled'])))
- return True
- if existing['retention_policy']['days'] != self.retention_policy['days']:
- self.log("retention_policy diff: origin {0} / update {1}".format(existing['retention_policy']['days'], str(self.retention_policy['days'])))
- return True
- if self.storage_account is not None and existing['storage_account'] != self.storage_account:
- self.log("storage_account diff: origin {0} / update {1}".format(existing['storage_account'], self.storage_account))
- return True
- if self.service_bus_rule_id is not None and existing['service_bus_rule_id'] != self.service_bus_rule_id:
- self.log("service_bus_rule_id diff: origin {0} / update {1}".format(existing['service_bus_rule_id'], self.service_bus_rule_id))
- return True
- return False
-
- def create_or_update_logprofile(self):
- '''
- Creates or Update log profile.
-
- :return: deserialized log profile state dictionary
- '''
- self.log(
- "Creating log profile instance {0}".format(self.name))
-
- try:
- params = LogProfileResource(
- location=self.location,
- locations=self.locations,
- categories=self.categories,
- retention_policy=RetentionPolicy(days=self.retention_policy['days'],
- enabled=self.retention_policy['enabled']) if self.retention_policy else None,
- storage_account_id=self.storage_account if self.storage_account else None,
- service_bus_rule_id=self.service_bus_rule_id if self.service_bus_rule_id else None,
- tags=self.tags
- )
-
- response = self.monitor_client.log_profiles.create_or_update(log_profile_name=self.name,
- parameters=params)
- if isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create/update log profile.')
- self.fail("Error creating/updating log profile: {0}".format(str(exc)))
- return logprofile_to_dict(response)
-
- def delete_logprofile(self):
- '''
- Deletes specified log profile.
-
- :return: True
- '''
- self.log("Deleting the log profile instance {0}".format(self.name))
- try:
- response = self.monitor_client.log_profiles.delete(log_profile_name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the log profile.')
- self.fail(
- "Error deleting the log profile: {0}".format(str(e)))
- return True
-
- def get_logprofile(self):
- '''
- Gets the properties of the specified log profile.
-
- :return: log profile state dictionary
- '''
- self.log("Checking if the log profile {0} is present".format(self.name))
-
- response = None
-
- try:
- response = self.monitor_client.log_profiles.get(log_profile_name=self.name)
-
- self.log("Response : {0}".format(response))
- self.log("log profile : {0} found".format(response.name))
- return logprofile_to_dict(response)
-
- except ErrorResponseException as ex:
- self.log("Didn't find log profile {0}".format(self.name))
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMMonitorLogprofile()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_mysqlconfiguration.py b/lib/ansible/modules/cloud/azure/azure_rm_mysqlconfiguration.py
deleted file mode 100644
index 6021e87319..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_mysqlconfiguration.py
+++ /dev/null
@@ -1,240 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_mysqlconfiguration
-version_added: "2.8"
-short_description: Manage Configuration instance
-description:
- - Create, update and delete instance of Configuration.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource.
- required: True
- server_name:
- description:
- - The name of the server.
- required: True
- name:
- description:
- - The name of the server configuration.
- required: True
- value:
- description:
- - Value of the configuration.
- state:
- description:
- - Assert the state of the MySQL configuration. Use C(present) to update setting, or C(absent) to reset to default value.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Update SQL Server setting
- azure_rm_mysqlconfiguration:
- resource_group: myResourceGroup
- server_name: myServer
- name: event_scheduler
- value: "ON"
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/myServer/confi
- gurations/event_scheduler"
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from azure.mgmt.rdbms.mysql import MySQLManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMMySqlConfiguration(AzureRMModuleBase):
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- value=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.server_name = None
- self.name = None
- self.value = None
-
- self.results = dict(changed=False)
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMMySqlConfiguration, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
-
- old_response = None
- response = None
-
- old_response = self.get_configuration()
-
- if not old_response:
- self.log("Configuration instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("Configuration instance already exists")
- if self.state == 'absent' and old_response['source'] == 'user-override':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.log("Need to check if Configuration instance has to be deleted or may be updated")
- if self.value != old_response.get('value'):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the Configuration instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_configuration()
-
- self.results['changed'] = True
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("Configuration instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_configuration()
- else:
- self.log("Configuration instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
-
- return self.results
-
- def create_update_configuration(self):
- self.log("Creating / Updating the Configuration instance {0}".format(self.name))
-
- try:
- response = self.mysql_client.configurations.create_or_update(resource_group_name=self.resource_group,
- server_name=self.server_name,
- configuration_name=self.name,
- value=self.value,
- source='user-override')
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the Configuration instance.')
- self.fail("Error creating the Configuration instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_configuration(self):
- self.log("Deleting the Configuration instance {0}".format(self.name))
- try:
- response = self.mysql_client.configurations.create_or_update(resource_group_name=self.resource_group,
- server_name=self.server_name,
- configuration_name=self.name,
- source='system-default')
- except CloudError as e:
- self.log('Error attempting to delete the Configuration instance.')
- self.fail("Error deleting the Configuration instance: {0}".format(str(e)))
-
- return True
-
- def get_configuration(self):
- self.log("Checking if the Configuration instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mysql_client.configurations.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- configuration_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("Configuration instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the Configuration instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMMySqlConfiguration()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_mysqlconfiguration_info.py b/lib/ansible/modules/cloud/azure/azure_rm_mysqlconfiguration_info.py
deleted file mode 100644
index 8fe9113527..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_mysqlconfiguration_info.py
+++ /dev/null
@@ -1,214 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_mysqlconfiguration_info
-version_added: "2.9"
-short_description: Get Azure MySQL Configuration facts
-description:
- - Get facts of Azure MySQL Configuration.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- type: str
- server_name:
- description:
- - The name of the server.
- required: True
- type: str
- name:
- description:
- - Setting name.
- type: str
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get specific setting of MySQL Server
- azure_rm_mysqlconfiguration_info:
- resource_group: myResourceGroup
- server_name: testmysqlserver
- name: deadlock_timeout
-
- - name: Get all settings of MySQL Server
- azure_rm_mysqlconfiguration_info:
- resource_group: myResourceGroup
- server_name: server_name
-'''
-
-RETURN = '''
-settings:
- description:
- - A list of dictionaries containing MySQL Server settings.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Setting resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/testmysqlser
- ver/configurations/deadlock_timeout"
- name:
- description:
- - Setting name.
- returned: always
- type: str
- sample: deadlock_timeout
- value:
- description:
- - Setting value.
- returned: always
- type: raw
- sample: 1000
- description:
- description:
- - Description of the configuration.
- returned: always
- type: str
- sample: Deadlock timeout.
- source:
- description:
- - Source of the configuration.
- returned: always
- type: str
- sample: system-default
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.azure_operation import AzureOperationPoller
- from azure.mgmt.rdbms.mysql import MySQLManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMMySqlConfigurationInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
- # store the results of the module operation
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.resource_group = None
- self.server_name = None
- self.name = None
- super(AzureRMMySqlConfigurationInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_mysqlconfiguration_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_mysqlconfiguration_facts' module has been renamed to 'azure_rm_mysqlconfiguration_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(MySQLManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if self.name is not None:
- self.results['settings'] = self.get()
- else:
- self.results['settings'] = self.list_by_server()
- return self.results
-
- def get(self):
- '''
- Gets facts of the specified MySQL Configuration.
-
- :return: deserialized MySQL Configurationinstance state dictionary
- '''
- response = None
- results = []
- try:
- response = self.mgmt_client.configurations.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- configuration_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Configurations.')
-
- if response is not None:
- results.append(self.format_item(response))
-
- return results
-
- def list_by_server(self):
- '''
- Gets facts of the specified MySQL Configuration.
-
- :return: deserialized MySQL Configurationinstance state dictionary
- '''
- response = None
- results = []
- try:
- response = self.mgmt_client.configurations.list_by_server(resource_group_name=self.resource_group,
- server_name=self.server_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Configurations.')
-
- if response is not None:
- for item in response:
- results.append(self.format_item(item))
-
- return results
-
- def format_item(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.resource_group,
- 'server_name': self.server_name,
- 'id': d['id'],
- 'name': d['name'],
- 'value': d['value'],
- 'description': d['description'],
- 'source': d['source']
- }
- return d
-
-
-def main():
- AzureRMMySqlConfigurationInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_mysqldatabase.py b/lib/ansible/modules/cloud/azure/azure_rm_mysqldatabase.py
deleted file mode 100644
index 435b495855..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_mysqldatabase.py
+++ /dev/null
@@ -1,302 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_mysqldatabase
-version_added: "2.5"
-short_description: Manage MySQL Database instance
-description:
- - Create, update and delete instance of MySQL Database.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- server_name:
- description:
- - The name of the server.
- required: True
- name:
- description:
- - The name of the database.
- required: True
- charset:
- description:
- - The charset of the database. Check MySQL documentation for possible values.
- - This is only set on creation, use I(force_update) to recreate a database if the values don't match.
- collation:
- description:
- - The collation of the database. Check MySQL documentation for possible values.
- - This is only set on creation, use I(force_update) to recreate a database if the values don't match.
- force_update:
- description:
- - When set to C(true), will delete and recreate the existing MySQL database if any of the properties don't match what is set.
- - When set to C(false), no change will occur to the database even if any of the properties do not match.
- type: bool
- default: 'no'
- state:
- description:
- - Assert the state of the MySQL Database. Use C(present) to create or update a database and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Create (or update) MySQL Database
- azure_rm_mysqldatabase:
- resource_group: myResourceGroup
- server_name: testserver
- name: db1
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/testserver/databases/db1
-name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: db1
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from azure.mgmt.rdbms.mysql import MySQLManagementClient
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMMySqlDatabase(AzureRMModuleBase):
- """Configuration class for an Azure RM MySQL Database resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- charset=dict(
- type='str'
- ),
- collation=dict(
- type='str'
- ),
- force_update=dict(
- type='bool',
- default=False
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.server_name = None
- self.name = None
- self.force_update = None
- self.parameters = dict()
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMMySqlDatabase, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- if key == "charset":
- self.parameters["charset"] = kwargs[key]
- elif key == "collation":
- self.parameters["collation"] = kwargs[key]
-
- old_response = None
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(MySQLManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- resource_group = self.get_resource_group(self.resource_group)
-
- old_response = self.get_mysqldatabase()
-
- if not old_response:
- self.log("MySQL Database instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("MySQL Database instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.log("Need to check if MySQL Database instance has to be deleted or may be updated")
- if ('collation' in self.parameters) and (self.parameters['collation'] != old_response['collation']):
- self.to_do = Actions.Update
- if ('charset' in self.parameters) and (self.parameters['charset'] != old_response['charset']):
- self.to_do = Actions.Update
- if self.to_do == Actions.Update:
- if self.force_update:
- if not self.check_mode:
- self.delete_mysqldatabase()
- else:
- self.fail("Database properties cannot be updated without setting 'force_update' option")
- self.to_do = Actions.NoAction
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the MySQL Database instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_mysqldatabase()
- self.results['changed'] = True
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("MySQL Database instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_mysqldatabase()
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_mysqldatabase():
- time.sleep(20)
- else:
- self.log("MySQL Database instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
- self.results["name"] = response["name"]
-
- return self.results
-
- def create_update_mysqldatabase(self):
- '''
- Creates or updates MySQL Database with the specified configuration.
-
- :return: deserialized MySQL Database instance state dictionary
- '''
- self.log("Creating / Updating the MySQL Database instance {0}".format(self.name))
-
- try:
- response = self.mgmt_client.databases.create_or_update(resource_group_name=self.resource_group,
- server_name=self.server_name,
- database_name=self.name,
- parameters=self.parameters)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the MySQL Database instance.')
- self.fail("Error creating the MySQL Database instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_mysqldatabase(self):
- '''
- Deletes specified MySQL Database instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the MySQL Database instance {0}".format(self.name))
- try:
- response = self.mgmt_client.databases.delete(resource_group_name=self.resource_group,
- server_name=self.server_name,
- database_name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the MySQL Database instance.')
- self.fail("Error deleting the MySQL Database instance: {0}".format(str(e)))
-
- return True
-
- def get_mysqldatabase(self):
- '''
- Gets the properties of the specified MySQL Database.
-
- :return: deserialized MySQL Database instance state dictionary
- '''
- self.log("Checking if the MySQL Database instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mgmt_client.databases.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- database_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("MySQL Database instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the MySQL Database instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMMySqlDatabase()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_mysqldatabase_info.py b/lib/ansible/modules/cloud/azure/azure_rm_mysqldatabase_info.py
deleted file mode 100644
index 988a417656..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_mysqldatabase_info.py
+++ /dev/null
@@ -1,209 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_mysqldatabase_info
-version_added: "2.9"
-short_description: Get Azure MySQL Database facts
-description:
- - Get facts of MySQL Database.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- type: str
- server_name:
- description:
- - The name of the server.
- required: True
- type: str
- name:
- description:
- - The name of the database.
- type: str
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of MySQL Database
- azure_rm_mysqldatabase_info:
- resource_group: myResourceGroup
- server_name: server_name
- name: database_name
-
- - name: List instances of MySQL Database
- azure_rm_mysqldatabase_info:
- resource_group: myResourceGroup
- server_name: server_name
-'''
-
-RETURN = '''
-databases:
- description:
- - A list of dictionaries containing facts for MySQL Databases.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/testser
- ver/databases/db1"
- resource_group:
- description:
- - Resource group name.
- returned: always
- type: str
- sample: testrg
- server_name:
- description:
- - Server name.
- returned: always
- type: str
- sample: testserver
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: db1
- charset:
- description:
- - The charset of the database.
- returned: always
- type: str
- sample: utf8
- collation:
- description:
- - The collation of the database.
- returned: always
- type: str
- sample: English_United States.1252
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.rdbms.mysql import MySQLManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMMySqlDatabaseInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.resource_group = None
- self.server_name = None
- self.name = None
- super(AzureRMMySqlDatabaseInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_mysqldatabase_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_mysqldatabase_facts' module has been renamed to 'azure_rm_mysqldatabase_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if (self.resource_group is not None and
- self.server_name is not None and
- self.name is not None):
- self.results['databases'] = self.get()
- elif (self.resource_group is not None and
- self.server_name is not None):
- self.results['databases'] = self.list_by_server()
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mysql_client.databases.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- database_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Databases.')
-
- if response is not None:
- results.append(self.format_item(response))
-
- return results
-
- def list_by_server(self):
- response = None
- results = []
- try:
- response = self.mysql_client.databases.list_by_server(resource_group_name=self.resource_group,
- server_name=self.server_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e)))
-
- if response is not None:
- for item in response:
- results.append(self.format_item(item))
-
- return results
-
- def format_item(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.resource_group,
- 'server_name': self.server_name,
- 'name': d['name'],
- 'charset': d['charset'],
- 'collation': d['collation']
- }
- return d
-
-
-def main():
- AzureRMMySqlDatabaseInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_mysqlfirewallrule.py b/lib/ansible/modules/cloud/azure/azure_rm_mysqlfirewallrule.py
deleted file mode 100644
index c4a176c4cb..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_mysqlfirewallrule.py
+++ /dev/null
@@ -1,277 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_mysqlfirewallrule
-version_added: "2.8"
-short_description: Manage MySQL firewall rule instance
-description:
- - Create, update and delete instance of MySQL firewall rule.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- server_name:
- description:
- - The name of the server.
- required: True
- name:
- description:
- - The name of the MySQL firewall rule.
- required: True
- start_ip_address:
- description:
- - The start IP address of the MySQL firewall rule. Must be IPv4 format.
- required: True
- end_ip_address:
- description:
- - The end IP address of the MySQL firewall rule. Must be IPv4 format.
- required: True
- state:
- description:
- - Assert the state of the MySQL firewall rule. Use C(present) to create or update a rule and C(absent) to ensure it is not present.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Create (or update) MySQL firewall rule
- azure_rm_mysqlfirewallrule:
- resource_group: myResourceGroup
- server_name: testserver
- name: rule1
- start_ip_address: 10.0.0.17
- end_ip_address: 10.0.0.20
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/testserver/fire
- wallRules/rule1"
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from azure.mgmt.rdbms.mysql import MySQLManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMMySqlFirewallRule(AzureRMModuleBase):
- """Configuration class for an Azure RM MySQL firewall rule resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- start_ip_address=dict(
- type='str'
- ),
- end_ip_address=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.server_name = None
- self.name = None
- self.start_ip_address = None
- self.end_ip_address = None
-
- self.results = dict(changed=False)
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMMySqlFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
-
- old_response = None
- response = None
-
- resource_group = self.get_resource_group(self.resource_group)
-
- old_response = self.get_firewallrule()
-
- if not old_response:
- self.log("MySQL firewall rule instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("MySQL firewall rule instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.log("Need to check if MySQL firewall rule instance has to be deleted or may be updated")
- if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']):
- self.to_do = Actions.Update
- if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the MySQL firewall rule instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_firewallrule()
-
- if not old_response:
- self.results['changed'] = True
- else:
- self.results['changed'] = old_response.__ne__(response)
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("MySQL firewall rule instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_firewallrule()
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_firewallrule():
- time.sleep(20)
- else:
- self.log("MySQL firewall rule instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
-
- return self.results
-
- def create_update_firewallrule(self):
- '''
- Creates or updates MySQL firewall rule with the specified configuration.
-
- :return: deserialized MySQL firewall rule instance state dictionary
- '''
- self.log("Creating / Updating the MySQL firewall rule instance {0}".format(self.name))
-
- try:
- response = self.mysql_client.firewall_rules.create_or_update(resource_group_name=self.resource_group,
- server_name=self.server_name,
- firewall_rule_name=self.name,
- start_ip_address=self.start_ip_address,
- end_ip_address=self.end_ip_address)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the MySQL firewall rule instance.')
- self.fail("Error creating the MySQL firewall rule instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_firewallrule(self):
- '''
- Deletes specified MySQL firewall rule instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the MySQL firewall rule instance {0}".format(self.name))
- try:
- response = self.mysql_client.firewall_rules.delete(resource_group_name=self.resource_group,
- server_name=self.server_name,
- firewall_rule_name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the MySQL firewall rule instance.')
- self.fail("Error deleting the MySQL firewall rule instance: {0}".format(str(e)))
-
- return True
-
- def get_firewallrule(self):
- '''
- Gets the properties of the specified MySQL firewall rule.
-
- :return: deserialized MySQL firewall rule instance state dictionary
- '''
- self.log("Checking if the MySQL firewall rule instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mysql_client.firewall_rules.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- firewall_rule_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("MySQL firewall rule instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the MySQL firewall rule instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMMySqlFirewallRule()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_mysqlfirewallrule_info.py b/lib/ansible/modules/cloud/azure/azure_rm_mysqlfirewallrule_info.py
deleted file mode 100644
index 9fa760ecdb..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_mysqlfirewallrule_info.py
+++ /dev/null
@@ -1,205 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_mysqlfirewallrule_info
-version_added: "2.9"
-short_description: Get Azure MySQL Firewall Rule facts
-description:
- - Get facts of Azure MySQL Firewall Rule.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- type: str
- server_name:
- description:
- - The name of the server.
- required: True
- type: str
- name:
- description:
- - The name of the server firewall rule.
- type: str
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of MySQL Firewall Rule
- azure_rm_mysqlfirewallrule_info:
- resource_group: myResourceGroup
- server_name: server_name
- name: firewall_rule_name
-
- - name: List instances of MySQL Firewall Rule
- azure_rm_mysqlfirewallrule_info:
- resource_group: myResourceGroup
- server_name: server_name
-'''
-
-RETURN = '''
-rules:
- description:
- - A list of dictionaries containing facts for MySQL Firewall Rule.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.DBforMySQL/servers/testserver/fire
- wallRules/rule1"
- server_name:
- description:
- - The name of the server.
- returned: always
- type: str
- sample: testserver
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: rule1
- start_ip_address:
- description:
- - The start IP address of the MySQL firewall rule.
- returned: always
- type: str
- sample: 10.0.0.16
- end_ip_address:
- description:
- - The end IP address of the MySQL firewall rule.
- returned: always
- type: str
- sample: 10.0.0.18
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.azure_operation import AzureOperationPoller
- from azure.mgmt.rdbms.mysql import MySQLManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMMySqlFirewallRuleInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.server_name = None
- self.name = None
- super(AzureRMMySqlFirewallRuleInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_mysqlfirewallrule_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_mysqlfirewallrule_facts' module has been renamed to 'azure_rm_mysqlfirewallrule_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(MySQLManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if (self.name is not None):
- self.results['rules'] = self.get()
- else:
- self.results['rules'] = self.list_by_server()
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.firewall_rules.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- firewall_rule_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for FirewallRules.')
-
- if response is not None:
- results.append(self.format_item(response))
-
- return results
-
- def list_by_server(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.firewall_rules.list_by_server(resource_group_name=self.resource_group,
- server_name=self.server_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for FirewallRules.')
-
- if response is not None:
- for item in response:
- results.append(self.format_item(item))
-
- return results
-
- def format_item(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.resource_group,
- 'id': d['id'],
- 'server_name': self.server_name,
- 'name': d['name'],
- 'start_ip_address': d['start_ip_address'],
- 'end_ip_address': d['end_ip_address']
- }
- return d
-
-
-def main():
- AzureRMMySqlFirewallRuleInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_mysqlserver.py b/lib/ansible/modules/cloud/azure/azure_rm_mysqlserver.py
deleted file mode 100644
index b9a045860d..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_mysqlserver.py
+++ /dev/null
@@ -1,386 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_mysqlserver
-version_added: "2.5"
-short_description: Manage MySQL Server instance
-description:
- - Create, update and delete instance of MySQL Server.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- name:
- description:
- - The name of the server.
- required: True
- sku:
- description:
- - The SKU (pricing tier) of the server.
- suboptions:
- name:
- description:
- - The name of the sku, typically, tier + family + cores, for example C(B_Gen4_1), C(GP_Gen5_8).
- tier:
- description:
- - The tier of the particular SKU, for example C(Basic).
- choices:
- - basic
- - standard
- capacity:
- description:
- - The scale up/out capacity, representing server's compute units.
- size:
- description:
- - The size code, to be interpreted by resource as appropriate.
- location:
- description:
- - Resource location. If not set, location from the resource group will be used as default.
- storage_mb:
- description:
- - The maximum storage allowed for a server.
- type: int
- version:
- description:
- - Server version.
- choices:
- - 5.6
- - 5.7
- enforce_ssl:
- description:
- - Enable SSL enforcement.
- type: bool
- default: False
- admin_username:
- description:
- - The administrator's login name of a server. Can only be specified when the server is being created (and is required for creation).
- admin_password:
- description:
- - The password of the administrator login.
- create_mode:
- description:
- - Create mode of SQL Server.
- default: Default
- state:
- description:
- - Assert the state of the MySQL Server. Use C(present) to create or update a server and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Create (or update) MySQL Server
- azure_rm_mysqlserver:
- resource_group: myResourceGroup
- name: testserver
- sku:
- name: B_Gen5_1
- tier: Basic
- location: eastus
- storage_mb: 1024
- enforce_ssl: True
- version: 5.6
- admin_username: cloudsa
- admin_password: password
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/mysqlsrv1b6dd89593
-version:
- description:
- - Server version. Possible values include C(5.6), C(5.7).
- returned: always
- type: str
- sample: 5.6
-state:
- description:
- - A state of a server that is visible to user. Possible values include C(Ready), C(Dropping), C(Disabled).
- returned: always
- type: str
- sample: Ready
-fully_qualified_domain_name:
- description:
- - The fully qualified domain name of a server.
- returned: always
- type: str
- sample: mysqlsrv1b6dd89593.mysql.database.azure.com
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from azure.mgmt.rdbms.mysql import MySQLManagementClient
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMMySqlServers(AzureRMModuleBase):
- """Configuration class for an Azure RM MySQL Server resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- sku=dict(
- type='dict'
- ),
- location=dict(
- type='str'
- ),
- storage_mb=dict(
- type='int'
- ),
- version=dict(
- type='str',
- choices=['5.6', '5.7']
- ),
- enforce_ssl=dict(
- type='bool',
- default=False
- ),
- create_mode=dict(
- type='str',
- default='Default'
- ),
- admin_username=dict(
- type='str'
- ),
- admin_password=dict(
- type='str',
- no_log=True
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.name = None
- self.parameters = dict()
- self.tags = None
-
- self.results = dict(changed=False)
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMMySqlServers, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- if key == "sku":
- ev = kwargs[key]
- if 'tier' in ev:
- if ev['tier'] == 'basic':
- ev['tier'] = 'Basic'
- elif ev['tier'] == 'standard':
- ev['tier'] = 'Standard'
- self.parameters["sku"] = ev
- elif key == "location":
- self.parameters["location"] = kwargs[key]
- elif key == "storage_mb":
- self.parameters.setdefault("properties", {}).setdefault("storage_profile", {})["storage_mb"] = kwargs[key]
- elif key == "version":
- self.parameters.setdefault("properties", {})["version"] = kwargs[key]
- elif key == "enforce_ssl":
- self.parameters.setdefault("properties", {})["ssl_enforcement"] = 'Enabled' if kwargs[key] else 'Disabled'
- elif key == "create_mode":
- self.parameters.setdefault("properties", {})["create_mode"] = kwargs[key]
- elif key == "admin_username":
- self.parameters.setdefault("properties", {})["administrator_login"] = kwargs[key]
- elif key == "admin_password":
- self.parameters.setdefault("properties", {})["administrator_login_password"] = kwargs[key]
-
- old_response = None
- response = None
-
- resource_group = self.get_resource_group(self.resource_group)
-
- if "location" not in self.parameters:
- self.parameters["location"] = resource_group.location
-
- old_response = self.get_mysqlserver()
-
- if not old_response:
- self.log("MySQL Server instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("MySQL Server instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.log("Need to check if MySQL Server instance has to be deleted or may be updated")
- update_tags, newtags = self.update_tags(old_response.get('tags', {}))
- if update_tags:
- self.tags = newtags
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the MySQL Server instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_mysqlserver()
-
- if not old_response:
- self.results['changed'] = True
- else:
- self.results['changed'] = old_response.__ne__(response)
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("MySQL Server instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_mysqlserver()
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_mysqlserver():
- time.sleep(20)
- else:
- self.log("MySQL Server instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
- self.results["version"] = response["version"]
- self.results["state"] = response["user_visible_state"]
- self.results["fully_qualified_domain_name"] = response["fully_qualified_domain_name"]
-
- return self.results
-
- def create_update_mysqlserver(self):
- '''
- Creates or updates MySQL Server with the specified configuration.
-
- :return: deserialized MySQL Server instance state dictionary
- '''
- self.log("Creating / Updating the MySQL Server instance {0}".format(self.name))
-
- try:
- self.parameters['tags'] = self.tags
- if self.to_do == Actions.Create:
- response = self.mysql_client.servers.create(resource_group_name=self.resource_group,
- server_name=self.name,
- parameters=self.parameters)
- else:
- # structure of parameters for update must be changed
- self.parameters.update(self.parameters.pop("properties", {}))
- response = self.mysql_client.servers.update(resource_group_name=self.resource_group,
- server_name=self.name,
- parameters=self.parameters)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the MySQL Server instance.')
- self.fail("Error creating the MySQL Server instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_mysqlserver(self):
- '''
- Deletes specified MySQL Server instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the MySQL Server instance {0}".format(self.name))
- try:
- response = self.mysql_client.servers.delete(resource_group_name=self.resource_group,
- server_name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the MySQL Server instance.')
- self.fail("Error deleting the MySQL Server instance: {0}".format(str(e)))
-
- return True
-
- def get_mysqlserver(self):
- '''
- Gets the properties of the specified MySQL Server.
-
- :return: deserialized MySQL Server instance state dictionary
- '''
- self.log("Checking if the MySQL Server instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mysql_client.servers.get(resource_group_name=self.resource_group,
- server_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("MySQL Server instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the MySQL Server instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMMySqlServers()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_mysqlserver_info.py b/lib/ansible/modules/cloud/azure/azure_rm_mysqlserver_info.py
deleted file mode 100644
index 3cae238715..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_mysqlserver_info.py
+++ /dev/null
@@ -1,262 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_mysqlserver_info
-version_added: "2.9"
-short_description: Get Azure MySQL Server facts
-description:
- - Get facts of MySQL Server.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- type: str
- name:
- description:
- - The name of the server.
- type: str
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- type: list
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of MySQL Server
- azure_rm_mysqlserver_info:
- resource_group: myResourceGroup
- name: server_name
-
- - name: List instances of MySQL Server
- azure_rm_mysqlserver_info:
- resource_group: myResourceGroup
-'''
-
-RETURN = '''
-servers:
- description:
- - A list of dictionaries containing facts for MySQL servers.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforMySQL/servers/myabdud1223
- resource_group:
- description:
- - Resource group name.
- returned: always
- type: str
- sample: myResourceGroup
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: myabdud1223
- location:
- description:
- - The location the resource resides in.
- returned: always
- type: str
- sample: eastus
- sku:
- description:
- - The SKU of the server.
- returned: always
- type: complex
- contains:
- name:
- description:
- - The name of the SKU.
- returned: always
- type: str
- sample: GP_Gen4_2
- tier:
- description:
- - The tier of the particular SKU.
- returned: always
- type: str
- sample: GeneralPurpose
- capacity:
- description:
- - The scale capacity.
- returned: always
- type: int
- sample: 2
- storage_mb:
- description:
- - The maximum storage allowed for a server.
- returned: always
- type: int
- sample: 128000
- enforce_ssl:
- description:
- - Enable SSL enforcement.
- returned: always
- type: bool
- sample: False
- admin_username:
- description:
- - The administrator's login name of a server.
- returned: always
- type: str
- sample: serveradmin
- version:
- description:
- - Server version.
- returned: always
- type: str
- sample: "9.6"
- user_visible_state:
- description:
- - A state of a server that is visible to user.
- returned: always
- type: str
- sample: Ready
- fully_qualified_domain_name:
- description:
- - The fully qualified domain name of a server.
- returned: always
- type: str
- sample: myabdud1223.mys.database.azure.com
- tags:
- description:
- - Tags assigned to the resource. Dictionary of string:string pairs.
- type: dict
- sample: { tag1: abc }
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.rdbms.mysql import MySQLManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMMySqlServerInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.resource_group = None
- self.name = None
- self.tags = None
- super(AzureRMMySqlServerInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_mysqlserver_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_mysqlserver_facts' module has been renamed to 'azure_rm_mysqlserver_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if (self.resource_group is not None and
- self.name is not None):
- self.results['servers'] = self.get()
- elif (self.resource_group is not None):
- self.results['servers'] = self.list_by_resource_group()
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mysql_client.servers.get(resource_group_name=self.resource_group,
- server_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for MySQL Server.')
-
- if response and self.has_tags(response.tags, self.tags):
- results.append(self.format_item(response))
-
- return results
-
- def list_by_resource_group(self):
- response = None
- results = []
- try:
- response = self.mysql_client.servers.list_by_resource_group(resource_group_name=self.resource_group)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for MySQL Servers.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_item(item))
-
- return results
-
- def format_item(self, item):
- d = item.as_dict()
- d = {
- 'id': d['id'],
- 'resource_group': self.resource_group,
- 'name': d['name'],
- 'sku': d['sku'],
- 'location': d['location'],
- 'storage_mb': d['storage_profile']['storage_mb'],
- 'version': d['version'],
- 'enforce_ssl': (d['ssl_enforcement'] == 'Enabled'),
- 'admin_username': d['administrator_login'],
- 'user_visible_state': d['user_visible_state'],
- 'fully_qualified_domain_name': d['fully_qualified_domain_name'],
- 'tags': d.get('tags')
- }
-
- return d
-
-
-def main():
- AzureRMMySqlServerInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py b/lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py
deleted file mode 100644
index 64a09776ce..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py
+++ /dev/null
@@ -1,877 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-# Yuwei ZHou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_networkinterface
-
-version_added: '2.1'
-
-short_description: Manage Azure network interfaces
-
-description:
- - Create, update or delete a network interface.
- - When creating a network interface you must provide the name of an existing virtual network, the name of an existing subnet within the virtual network.
- - A default security group and public IP address will be created automatically.
- - Or you can provide the name of an existing security group and public IP address.
- - See the examples below for more details.
-
-options:
- resource_group:
- description:
- - Name of a resource group where the network interface exists or will be created.
- required: true
- name:
- description:
- - Name of the network interface.
- required: true
- state:
- description:
- - Assert the state of the network interface. Use C(present) to create or update an interface and
- C(absent) to delete an interface.
- default: present
- choices:
- - absent
- - present
- location:
- description:
- - Valid Azure location. Defaults to location of the resource group.
- virtual_network:
- description:
- - An existing virtual network with which the network interface will be associated. Required when creating a network interface.
- - It can be the virtual network's name.
- - Make sure your virtual network is in the same resource group as NIC when you give only the name.
- - It can be the virtual network's resource id.
- - It can be a dict which contains I(name) and I(resource_group) of the virtual network.
- aliases:
- - virtual_network_name
- required: true
- subnet_name:
- description:
- - Name of an existing subnet within the specified virtual network. Required when creating a network interface.
- - Use the C(virtual_network)'s resource group.
- aliases:
- - subnet
- required: true
- os_type:
- description:
- - Determines any rules to be added to a default security group.
- - When creating a network interface, if no security group name is provided, a default security group will be created.
- - If the I(os_type=Windows), a rule allowing RDP access will be added.
- - If the I(os_type=Linux), a rule allowing SSH access will be added.
- choices:
- - Windows
- - Linux
- default: Linux
- private_ip_address:
- description:
- - (Deprecate) Valid IPv4 address that falls within the specified subnet.
- - This option will be deprecated in 2.9, use I(ip_configurations) instead.
- private_ip_allocation_method:
- description:
- - (Deprecate) Whether or not the assigned IP address is permanent.
- - When creating a network interface, if you specify I(private_ip_address=Static), you must provide a value for I(private_ip_address).
- - You can update the allocation method to C(Static) after a dynamic private IP address has been assigned.
- - This option will be deprecated in 2.9, use I(ip_configurations) instead.
- default: Dynamic
- choices:
- - Dynamic
- - Static
- public_ip:
- description:
- - (Deprecate) When creating a network interface, if no public IP address name is provided a default public IP address will be created.
- - Set to C(false) if you do not want a public IP address automatically created.
- - This option will be deprecated in 2.9, use I(ip_configurations) instead.
- type: bool
- default: 'yes'
- public_ip_address_name:
- description:
- - (Deprecate) Name of an existing public IP address object to associate with the security group.
- - This option will be deprecated in 2.9, use I(ip_configurations) instead.
- aliases:
- - public_ip_address
- - public_ip_name
- public_ip_allocation_method:
- description:
- - (Deprecate) If a I(public_ip_address_name) is not provided, a default public IP address will be created.
- - The allocation method determines whether or not the public IP address assigned to the network interface is permanent.
- - This option will be deprecated in 2.9, use I(ip_configurations) instead.
- choices:
- - Dynamic
- - Static
- default: Dynamic
- ip_configurations:
- description:
- - List of IP configurations. Each configuration object should include
- field I(private_ip_address), I(private_ip_allocation_method), I(public_ip_address_name), I(public_ip), I(public_ip_allocation_method), I(name).
- suboptions:
- name:
- description:
- - Name of the IP configuration.
- required: true
- private_ip_address:
- description:
- - Private IP address for the IP configuration.
- private_ip_allocation_method:
- description:
- - Private IP allocation method.
- choices:
- - Dynamic
- - Static
- default: Dynamic
- public_ip_address_name:
- description:
- - Name of the public IP address. None for disable IP address.
- aliases:
- - public_ip_address
- - public_ip_name
- public_ip_allocation_method:
- description:
- - Public IP allocation method.
- choices:
- - Dynamic
- - Static
- default: Dynamic
- load_balancer_backend_address_pools:
- description:
- - List of existing load-balancer backend address pools to associate with the network interface.
- - Can be written as a resource ID.
- - Also can be a dict of I(name) and I(load_balancer).
- version_added: '2.6'
- primary:
- description:
- - Whether the IP configuration is the primary one in the list.
- type: bool
- default: 'no'
- application_security_groups:
- description:
- - List of application security groups in which the IP configuration is included.
- - Element of the list could be a resource id of application security group, or dict of I(resource_group) and I(name).
- version_added: '2.8'
- version_added: '2.5'
- enable_accelerated_networking:
- description:
- - Whether the network interface should be created with the accelerated networking feature or not.
- type: bool
- version_added: '2.7'
- default: False
- create_with_security_group:
- description:
- - Whether a security group should be be created with the NIC.
- - If this flag set to C(True) and no I(security_group) set, a default security group will be created.
- type: bool
- version_added: '2.6'
- default: True
- security_group:
- description:
- - An existing security group with which to associate the network interface.
- - If not provided, a default security group will be created when I(create_with_security_group=true).
- - It can be the name of security group.
- - Make sure the security group is in the same resource group when you only give its name.
- - It can be the resource id.
- - It can be a dict contains security_group's I(name) and I(resource_group).
- aliases:
- - security_group_name
- open_ports:
- description:
- - When a default security group is created for a Linux host a rule will be added allowing inbound TCP
- connections to the default SSH port C(22), and for a Windows host rules will be added allowing inbound
- access to RDP ports C(3389) and C(5986). Override the default ports by providing a list of open ports.
- enable_ip_forwarding:
- description:
- - Whether to enable IP forwarding.
- aliases:
- - ip_forwarding
- type: bool
- default: False
- version_added: '2.7'
- dns_servers:
- description:
- - Which DNS servers should the NIC lookup.
- - List of IP addresses.
- type: list
- version_added: '2.7'
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Chris Houseknecht (@chouseknecht)
- - Matt Davis (@nitzmahone)
- - Yuwei Zhou (@yuwzho)
-'''
-
-EXAMPLES = '''
- - name: Create a network interface with minimal parameters
- azure_rm_networkinterface:
- name: nic001
- resource_group: myResourceGroup
- virtual_network: vnet001
- subnet_name: subnet001
- ip_configurations:
- - name: ipconfig1
- public_ip_address_name: publicip001
- primary: True
-
- - name: Create a network interface with private IP address only (no Public IP)
- azure_rm_networkinterface:
- name: nic001
- resource_group: myResourceGroup
- virtual_network: vnet001
- subnet_name: subnet001
- create_with_security_group: False
- ip_configurations:
- - name: ipconfig1
- primary: True
-
- - name: Create a network interface for use in a Windows host (opens RDP port) with custom RDP port
- azure_rm_networkinterface:
- name: nic002
- resource_group: myResourceGroup
- virtual_network: vnet001
- subnet_name: subnet001
- os_type: Windows
- rdp_port: 3399
- security_group: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurit
- yGroups/nsg001"
- ip_configurations:
- - name: ipconfig1
- public_ip_address_name: publicip001
- primary: True
-
- - name: Create a network interface using existing security group and public IP
- azure_rm_networkinterface:
- name: nic003
- resource_group: myResourceGroup
- virtual_network: vnet001
- subnet_name: subnet001
- security_group: secgroup001
- ip_configurations:
- - name: ipconfig1
- public_ip_address_name: publicip001
- primary: True
-
- - name: Create a network with multiple ip configurations
- azure_rm_networkinterface:
- name: nic004
- resource_group: myResourceGroup
- subnet_name: subnet001
- virtual_network: vnet001
- security_group:
- name: testnic002
- resource_group: Testing1
- ip_configurations:
- - name: ipconfig1
- public_ip_address_name: publicip001
- primary: True
- - name: ipconfig2
- load_balancer_backend_address_pools:
- - "{{ loadbalancer001.state.backend_address_pools[0].id }}"
- - name: backendaddrpool1
- load_balancer: loadbalancer001
-
- - name: Create a network interface in accelerated networking mode
- azure_rm_networkinterface:
- name: nic005
- resource_group: myResourceGroup
- virtual_network_name: vnet001
- subnet_name: subnet001
- enable_accelerated_networking: True
-
- - name: Create a network interface with IP forwarding
- azure_rm_networkinterface:
- name: nic001
- resource_group: myResourceGroup
- virtual_network: vnet001
- subnet_name: subnet001
- ip_forwarding: True
- ip_configurations:
- - name: ipconfig1
- public_ip_address_name: publicip001
- primary: True
-
- - name: Create a network interface with dns servers
- azure_rm_networkinterface:
- name: nic009
- resource_group: myResourceGroup
- virtual_network: vnet001
- subnet_name: subnet001
- dns_servers:
- - 8.8.8.8
-
- - name: Delete network interface
- azure_rm_networkinterface:
- resource_group: myResourceGroup
- name: nic003
- state: absent
-'''
-
-RETURN = '''
-state:
- description:
- - The current state of the network interface.
- returned: always
- type: complex
- contains:
- dns_server:
- description:
- - Which DNS servers should the NIC lookup.
- - List of IP addresses.
- type: list
- sample: ['8.9.10.11', '7.8.9.10']
- dns_setting:
- description:
- - The DNS settings in network interface.
- type: dict
- sample: {
- "applied_dns_servers": [],
- "dns_servers": [
- "8.9.10.11",
- "7.8.9.10"
- ],
- "internal_dns_name_label": null,
- "internal_fqdn": null
- }
- enable_ip_forwarding:
- description:
- Whether to enable IP forwarding.
- type: bool
- sample: true
- etag:
- description:
- - A unique read-only string that changes whenever the resource is updated.
- type: str
- sample: 'W/"be115a43-2148-4545-a324-f33ad444c926"'
- id:
- description:
- - Id of the network interface.
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkInterfaces/nic003"
- enable_accelerated_networking:
- description:
- - Whether the network interface should be created with the accelerated networking feature or not.
- type: bool
- sample: true
- ip_configurations:
- description:
- - List of IP configurations.
- type: complex
- contains:
- name:
- description:
- - Name of the IP configuration.
- type: str
- sample: default
- load_balancer_backend_address_pools:
- description:
- - List of existing load-balancer backend address pools to associate with the network interface.
- type: list
- private_ip_address:
- description:
- - Private IP address for the IP configuration.
- type: str
- sample: "10.1.0.10"
- private_ip_allocation_method:
- description:
- - Private IP allocation method.
- type: str
- sample: "Static"
- public_ip_address:
- description:
- - Name of the public IP address. None for disable IP address.
- type: dict
- sample: {
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/publicIPAddresse
- s/publicip001",
- "name": "publicip001"
- }
- subnet:
- description:
- - The reference of the subnet resource.
- type: dict
- sample: {
- "id": "/subscriptions/xxxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/
- myresourcegroup/providers/Microsoft.Network/virtualNetworks/tnb57dc95318/subnets/tnb57dc95318",
- "name": "tnb57dc95318",
- "resource_group": "myresourcegroup",
- "virtual_network_name": "tnb57dc95318"
- }
- location:
- description:
- - The network interface resource location.
- type: str
- sample: eastus
- mac_address:
- description:
- - The MAC address of the network interface.
- type: str
- name:
- description:
- - Name of the network interface.
- type: str
- sample: nic003
- network_security_group:
- description:
- - The reference of the network security group resource.
- type: dict
- sample: {
- "id": "/subscriptions//xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/
- myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/nsg001",
- "name": "nsg001"
- }
- primary:
- description:
- - Get whether this is a primary network interface on virtual machine.
- type: bool
- sample: true
- provisioning_state:
- description:
- - The provisioning state of the public IP resource.
- type: str
- sample: Succeeded
- tags:
- description:
- -Tags of the network interface.
- type: dict
- sample: { 'key': 'value' }
- type:
- description:
- - Type of the resource.
- type: str
- sample: "Microsoft.Network/networkInterfaces"
-'''
-
-try:
- from msrestazure.tools import parse_resource_id, resource_id, is_valid_resource_id
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict, normalize_location_name, format_resource_id
-from ansible.module_utils._text import to_native
-
-
-def subnet_to_dict(subnet):
- dic = azure_id_to_dict(subnet.id)
- return dict(
- id=subnet.id,
- virtual_network_name=dic.get('virtualNetworks'),
- resource_group=dic.get('resourceGroups'),
- name=dic.get('subnets')
- )
-
-
-def nic_to_dict(nic):
- ip_configurations = [
- dict(
- name=config.name,
- private_ip_address=config.private_ip_address,
- private_ip_allocation_method=config.private_ip_allocation_method,
- subnet=subnet_to_dict(config.subnet),
- primary=config.primary,
- load_balancer_backend_address_pools=([item.id for item in config.load_balancer_backend_address_pools]
- if config.load_balancer_backend_address_pools else None),
- public_ip_address=dict(
- id=config.public_ip_address.id,
- name=azure_id_to_dict(config.public_ip_address.id).get('publicIPAddresses'),
- public_ip_allocation_method=config.public_ip_address.public_ip_allocation_method
- ) if config.public_ip_address else None,
- application_security_groups=([asg.id for asg in config.application_security_groups]
- if config.application_security_groups else None)
- ) for config in nic.ip_configurations
- ]
- return dict(
- id=nic.id,
- name=nic.name,
- type=nic.type,
- location=nic.location,
- tags=nic.tags,
- network_security_group=dict(
- id=nic.network_security_group.id,
- name=azure_id_to_dict(nic.network_security_group.id).get('networkSecurityGroups')
- ) if nic.network_security_group else None,
- dns_settings=dict(
- dns_servers=nic.dns_settings.dns_servers,
- applied_dns_servers=nic.dns_settings.applied_dns_servers,
- internal_dns_name_label=nic.dns_settings.internal_dns_name_label,
- internal_fqdn=nic.dns_settings.internal_fqdn
- ),
- ip_configurations=ip_configurations,
- ip_configuration=ip_configurations[0] if len(ip_configurations) == 1 else None, # for compatible issue, keep this field
- mac_address=nic.mac_address,
- enable_ip_forwarding=nic.enable_ip_forwarding,
- provisioning_state=nic.provisioning_state,
- etag=nic.etag,
- enable_accelerated_networking=nic.enable_accelerated_networking,
- dns_servers=nic.dns_settings.dns_servers,
- )
-
-
-ip_configuration_spec = dict(
- name=dict(type='str', required=True),
- private_ip_address=dict(type='str'),
- private_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'),
- public_ip_address_name=dict(type='str', aliases=['public_ip_address', 'public_ip_name']),
- public_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'),
- load_balancer_backend_address_pools=dict(type='list'),
- primary=dict(type='bool', default=False),
- application_security_groups=dict(type='list', elements='raw')
-)
-
-
-class AzureRMNetworkInterface(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- location=dict(type='str'),
- enable_accelerated_networking=dict(type='bool', default=False),
- create_with_security_group=dict(type='bool', default=True),
- security_group=dict(type='raw', aliases=['security_group_name']),
- state=dict(default='present', choices=['present', 'absent']),
- private_ip_address=dict(type='str'),
- private_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'),
- public_ip_address_name=dict(type='str', aliases=['public_ip_address', 'public_ip_name']),
- public_ip=dict(type='bool', default=True),
- subnet_name=dict(type='str', aliases=['subnet']),
- virtual_network=dict(type='raw', aliases=['virtual_network_name']),
- public_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'),
- ip_configurations=dict(type='list', default=None, elements='dict', options=ip_configuration_spec),
- os_type=dict(type='str', choices=['Windows', 'Linux'], default='Linux'),
- open_ports=dict(type='list'),
- enable_ip_forwarding=dict(type='bool', aliases=['ip_forwarding'], default=False),
- dns_servers=dict(type='list'),
- )
-
- required_if = [
- ('state', 'present', ['subnet_name', 'virtual_network'])
- ]
-
- self.resource_group = None
- self.name = None
- self.location = None
- self.create_with_security_group = None
- self.enable_accelerated_networking = None
- self.security_group = None
- self.private_ip_address = None
- self.private_ip_allocation_method = None
- self.public_ip_address_name = None
- self.public_ip = None
- self.subnet_name = None
- self.virtual_network = None
- self.public_ip_allocation_method = None
- self.state = None
- self.tags = None
- self.os_type = None
- self.open_ports = None
- self.enable_ip_forwarding = None
- self.ip_configurations = None
- self.dns_servers = None
-
- self.results = dict(
- changed=False,
- state=dict(),
- )
-
- super(AzureRMNetworkInterface, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- required_if=required_if)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- results = None
- changed = False
- nic = None
- nsg = None
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- # Set default location
- self.location = resource_group.location
- self.location = normalize_location_name(self.location)
-
- # parse the virtual network resource group and name
- self.virtual_network = self.parse_resource_to_dict(self.virtual_network)
-
- # if not set the security group name, use nic name for default
- self.security_group = self.parse_resource_to_dict(self.security_group or self.name)
-
- # if application security groups set, convert to resource id format
- if self.ip_configurations:
- for config in self.ip_configurations:
- if config.get('application_security_groups'):
- asgs = []
- for asg in config['application_security_groups']:
- asg_resource_id = asg
- if isinstance(asg, str) and (not is_valid_resource_id(asg)):
- asg = self.parse_resource_to_dict(asg)
- if isinstance(asg, dict):
- asg_resource_id = format_resource_id(val=asg['name'],
- subscription_id=self.subscription_id,
- namespace='Microsoft.Network',
- types='applicationSecurityGroups',
- resource_group=asg['resource_group'])
- asgs.append(asg_resource_id)
- if len(asgs) > 0:
- config['application_security_groups'] = asgs
-
- if self.state == 'present' and not self.ip_configurations:
- # construct the ip_configurations array for compatible
- self.deprecate('Setting ip_configuration flatten is deprecated and will be removed.'
- ' Using ip_configurations list to define the ip configuration', version='2.9')
- self.ip_configurations = [
- dict(
- private_ip_address=self.private_ip_address,
- private_ip_allocation_method=self.private_ip_allocation_method,
- public_ip_address_name=self.public_ip_address_name if self.public_ip else None,
- public_ip_allocation_method=self.public_ip_allocation_method,
- name='default',
- primary=True
- )
- ]
-
- try:
- self.log('Fetching network interface {0}'.format(self.name))
- nic = self.network_client.network_interfaces.get(self.resource_group, self.name)
-
- self.log('Network interface {0} exists'.format(self.name))
- self.check_provisioning_state(nic, self.state)
- results = nic_to_dict(nic)
- self.log(results, pretty_print=True)
-
- nsg = None
- if self.state == 'present':
- # check for update
- update_tags, results['tags'] = self.update_tags(results['tags'])
- if update_tags:
- changed = True
-
- if self.create_with_security_group != bool(results.get('network_security_group')):
- self.log("CHANGED: add or remove network interface {0} network security group".format(self.name))
- changed = True
-
- if self.enable_accelerated_networking != bool(results.get('enable_accelerated_networking')):
- self.log("CHANGED: Accelerated Networking set to {0} (previously {1})".format(
- self.enable_accelerated_networking,
- results.get('enable_accelerated_networking')))
- changed = True
-
- if self.enable_ip_forwarding != bool(results.get('enable_ip_forwarding')):
- self.log("CHANGED: IP forwarding set to {0} (previously {1})".format(
- self.enable_ip_forwarding,
- results.get('enable_ip_forwarding')))
- changed = True
-
- # We need to ensure that dns_servers are list like
- dns_servers_res = results.get('dns_settings').get('dns_servers')
- _dns_servers_set = sorted(self.dns_servers) if isinstance(self.dns_servers, list) else list()
- _dns_servers_res = sorted(dns_servers_res) if isinstance(self.dns_servers, list) else list()
- if _dns_servers_set != _dns_servers_res:
- self.log("CHANGED: DNS servers set to {0} (previously {1})".format(
- ", ".join(_dns_servers_set),
- ", ".join(_dns_servers_res)))
- changed = True
-
- if not changed:
- nsg = self.get_security_group(self.security_group['resource_group'], self.security_group['name'])
- if nsg and results.get('network_security_group') and results['network_security_group'].get('id') != nsg.id:
- self.log("CHANGED: network interface {0} network security group".format(self.name))
- changed = True
-
- if results['ip_configurations'][0]['subnet']['virtual_network_name'] != self.virtual_network['name']:
- self.log("CHANGED: network interface {0} virtual network name".format(self.name))
- changed = True
-
- if results['ip_configurations'][0]['subnet']['resource_group'] != self.virtual_network['resource_group']:
- self.log("CHANGED: network interface {0} virtual network resource group".format(self.name))
- changed = True
-
- if results['ip_configurations'][0]['subnet']['name'] != self.subnet_name:
- self.log("CHANGED: network interface {0} subnet name".format(self.name))
- changed = True
-
- # check the ip_configuration is changed
- # construct two set with the same structure and then compare
- # the list should contains:
- # name, private_ip_address, public_ip_address_name, private_ip_allocation_method, subnet_name
- ip_configuration_result = self.construct_ip_configuration_set(results['ip_configurations'])
- ip_configuration_request = self.construct_ip_configuration_set(self.ip_configurations)
- if ip_configuration_result != ip_configuration_request:
- self.log("CHANGED: network interface {0} ip configurations".format(self.name))
- changed = True
-
- elif self.state == 'absent':
- self.log("CHANGED: network interface {0} exists but requested state is 'absent'".format(self.name))
- changed = True
- except CloudError:
- self.log('Network interface {0} does not exist'.format(self.name))
- if self.state == 'present':
- self.log("CHANGED: network interface {0} does not exist but requested state is 'present'".format(self.name))
- changed = True
-
- self.results['changed'] = changed
- self.results['state'] = results
-
- if self.check_mode:
- return self.results
-
- if changed:
- if self.state == 'present':
- subnet = self.network_models.SubResource(
- id='/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/virtualNetworks/{2}/subnets/{3}'.format(
- self.virtual_network['subscription_id'],
- self.virtual_network['resource_group'],
- self.virtual_network['name'],
- self.subnet_name))
-
- nic_ip_configurations = [
- self.network_models.NetworkInterfaceIPConfiguration(
- private_ip_allocation_method=ip_config.get('private_ip_allocation_method'),
- private_ip_address=ip_config.get('private_ip_address'),
- name=ip_config.get('name'),
- subnet=subnet,
- public_ip_address=self.get_or_create_public_ip_address(ip_config),
- load_balancer_backend_address_pools=([self.network_models.BackendAddressPool(id=self.backend_addr_pool_id(bap_id))
- for bap_id in ip_config.get('load_balancer_backend_address_pools')]
- if ip_config.get('load_balancer_backend_address_pools') else None),
- primary=ip_config.get('primary'),
- application_security_groups=([self.network_models.ApplicationSecurityGroup(id=asg_id)
- for asg_id in ip_config.get('application_security_groups')]
- if ip_config.get('application_security_groups') else None)
- ) for ip_config in self.ip_configurations
- ]
-
- nsg = self.create_default_securitygroup(self.security_group['resource_group'],
- self.location,
- self.security_group['name'],
- self.os_type,
- self.open_ports) if self.create_with_security_group else None
-
- self.log('Creating or updating network interface {0}'.format(self.name))
- nic = self.network_models.NetworkInterface(
- id=results['id'] if results else None,
- location=self.location,
- tags=self.tags,
- ip_configurations=nic_ip_configurations,
- enable_accelerated_networking=self.enable_accelerated_networking,
- enable_ip_forwarding=self.enable_ip_forwarding,
- network_security_group=nsg
- )
- if self.dns_servers:
- dns_settings = self.network_models.NetworkInterfaceDnsSettings(
- dns_servers=self.dns_servers)
- nic.dns_settings = dns_settings
- self.results['state'] = self.create_or_update_nic(nic)
- elif self.state == 'absent':
- self.log('Deleting network interface {0}'.format(self.name))
- self.delete_nic()
- # Delete doesn't return anything. If we get this far, assume success
- self.results['state']['status'] = 'Deleted'
-
- return self.results
-
- def get_or_create_public_ip_address(self, ip_config):
- name = ip_config.get('public_ip_address_name')
-
- if not (self.public_ip and name):
- return None
-
- pip = self.get_public_ip_address(name)
- if not pip:
- params = self.network_models.PublicIPAddress(
- location=self.location,
- public_ip_allocation_method=ip_config.get('public_ip_allocation_method'),
- )
- try:
- poller = self.network_client.public_ip_addresses.create_or_update(self.resource_group, name, params)
- pip = self.get_poller_result(poller)
- except CloudError as exc:
- self.fail("Error creating {0} - {1}".format(name, str(exc)))
- return pip
-
- def create_or_update_nic(self, nic):
- try:
- poller = self.network_client.network_interfaces.create_or_update(self.resource_group, self.name, nic)
- new_nic = self.get_poller_result(poller)
- return nic_to_dict(new_nic)
- except Exception as exc:
- self.fail("Error creating or updating network interface {0} - {1}".format(self.name, str(exc)))
-
- def delete_nic(self):
- try:
- poller = self.network_client.network_interfaces.delete(self.resource_group, self.name)
- self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error deleting network interface {0} - {1}".format(self.name, str(exc)))
- return True
-
- def get_public_ip_address(self, name):
- self.log("Fetching public ip address {0}".format(name))
- try:
- return self.network_client.public_ip_addresses.get(self.resource_group, name)
- except Exception as exc:
- return None
-
- def get_security_group(self, resource_group, name):
- self.log("Fetching security group {0}".format(name))
- try:
- return self.network_client.network_security_groups.get(resource_group, name)
- except Exception as exc:
- return None
-
- def backend_addr_pool_id(self, val):
- if isinstance(val, dict):
- lb = val.get('load_balancer', None)
- name = val.get('name', None)
- if lb and name:
- return resource_id(subscription=self.subscription_id,
- resource_group=self.resource_group,
- namespace='Microsoft.Network',
- type='loadBalancers',
- name=lb,
- child_type_1='backendAddressPools',
- child_name_1=name)
- return val
-
- def construct_ip_configuration_set(self, raw):
- configurations = [str(dict(
- private_ip_allocation_method=to_native(item.get('private_ip_allocation_method')),
- public_ip_address_name=(to_native(item.get('public_ip_address').get('name'))
- if item.get('public_ip_address') else to_native(item.get('public_ip_address_name'))),
- primary=item.get('primary'),
- load_balancer_backend_address_pools=(set([to_native(self.backend_addr_pool_id(id))
- for id in item.get('load_balancer_backend_address_pools')])
- if item.get('load_balancer_backend_address_pools') else None),
- application_security_groups=(set([to_native(asg_id) for asg_id in item.get('application_security_groups')])
- if item.get('application_security_groups') else None),
- name=to_native(item.get('name'))
- )) for item in raw]
- return set(configurations)
-
-
-def main():
- AzureRMNetworkInterface()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_networkinterface_info.py b/lib/ansible/modules/cloud/azure/azure_rm_networkinterface_info.py
deleted file mode 100644
index db7eded29c..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_networkinterface_info.py
+++ /dev/null
@@ -1,354 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_networkinterface_info
-
-version_added: "2.9"
-
-short_description: Get network interface facts
-
-description:
- - Get facts for a specific network interface or all network interfaces within a resource group.
-
-options:
- name:
- description:
- - Only show results for a specific network interface.
- resource_group:
- description:
- - Name of the resource group containing the network interface(s). Required when searching by name.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Chris Houseknecht (@chouseknecht)
- - Matt Davis (@nitzmahone)
-
-'''
-
-EXAMPLES = '''
- - name: Get facts for one network interface
- azure_rm_networkinterface_info:
- resource_group: myResourceGroup
- name: nic001
-
- - name: Get network interfaces within a resource group
- azure_rm_networkinterface_info:
- resource_group: myResourceGroup
-
- - name: Get network interfaces by tag
- azure_rm_networkinterface_info:
- resource_group: myResourceGroup
- tags:
- - testing
- - foo:bar
-'''
-
-RETURN = '''
-azure_networkinterfaces:
- description:
- - List of network interface dicts.
- returned: always
- type: list
- example: [{
- "dns_settings": {
- "applied_dns_servers": [],
- "dns_servers": [],
- "internal_dns_name_label": null,
- "internal_fqdn": null
- },
- "enable_ip_forwarding": false,
- "etag": 'W/"59726bfc-08c4-44ed-b900-f6a559876a9d"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkInterfaces/nic003",
- "ip_configuration": {
- "name": "default",
- "private_ip_address": "10.10.0.4",
- "private_ip_allocation_method": "Dynamic",
- "public_ip_address": {
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/publicIPAddresses/publicip001",
- "name": "publicip001"
- },
- "subnet": {
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet001/subnets/subnet001",
- "name": "subnet001",
- "virtual_network_name": "vnet001"
- }
- },
- "location": "westus",
- "mac_address": null,
- "name": "nic003",
- "network_security_group": {
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/secgroup001",
- "name": "secgroup001"
- },
- "primary": null,
- "provisioning_state": "Succeeded",
- "tags": {},
- "type": "Microsoft.Network/networkInterfaces"
- }]
-networkinterfaces:
- description:
- - List of network interface dicts. Each dict contains parameters can be passed to M(azure_rm_networkinterface) module.
- type: list
- returned: always
- contains:
- id:
- description:
- - Id of the network interface.
- resource_group:
- description:
- - Name of a resource group where the network interface exists.
- name:
- description:
- - Name of the network interface.
- location:
- description:
- - Azure location.
- virtual_network:
- description:
- - An existing virtual network with which the network interface will be associated.
- - It is a dict which contains I(name) and I(resource_group) of the virtual network.
- subnet:
- description:
- - Name of an existing subnet within the specified virtual network.
- tags:
- description:
- - Tags of the network interface.
- ip_configurations:
- description:
- - List of IP configurations, if contains multiple configurations.
- contains:
- name:
- description:
- - Name of the IP configuration.
- private_ip_address:
- description:
- - Private IP address for the IP configuration.
- private_ip_allocation_method:
- description:
- - Private IP allocation method.
- public_ip_address:
- description:
- - Name of the public IP address. None for disable IP address.
- public_ip_allocation_method:
- description:
- - Public IP allocation method.
- load_balancer_backend_address_pools:
- description:
- - List of existing load-balancer backend address pools to associate with the network interface.
- primary:
- description:
- - Whether the IP configuration is the primary one in the list.
- application_security_groups:
- description:
- - List of Application security groups.
- sample: /subscriptions/<subsid>/resourceGroups/<rg>/providers/Microsoft.Network/applicationSecurityGroups/myASG
- enable_accelerated_networking:
- description:
- - Specifies whether the network interface should be created with the accelerated networking feature or not.
- create_with_security_group:
- description:
- - Specifies whether a default security group should be be created with the NIC. Only applies when creating a new NIC.
- type: bool
- security_group:
- description:
- - A security group resource ID with which to associate the network interface.
- enable_ip_forwarding:
- description:
- - Whether to enable IP forwarding
- dns_servers:
- description:
- - Which DNS servers should the NIC lookup.
- - List of IP addresses.
- mac_address:
- description:
- - The MAC address of the network interface.
- provisioning_state:
- description:
- - The provisioning state of the network interface.
- dns_settings:
- description:
- - The DNS settings in network interface.
- contains:
- dns_servers:
- description:
- - List of DNS servers IP addresses.
- applied_dns_servers:
- description:
- - If the VM that uses this NIC is part of an Availability Set, then this list will have the union of all DNS servers
- from all NICs that are part of the Availability Set. This property is what is configured on each of those VMs.
- internal_dns_name_label:
- description:
- - Relative DNS name for this NIC used for internal communications between VMs in the same virtual network.
- internal_fqdn:
- description:
- - Fully qualified DNS name supporting internal communications between VMs in the same virtual network.
-''' # NOQA
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.common import AzureMissingResourceHttpError, AzureHttpError
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict
-
-
-AZURE_OBJECT_CLASS = 'NetworkInterface'
-
-
-def nic_to_dict(nic):
- ip_configurations = [
- dict(
- name=config.name,
- private_ip_address=config.private_ip_address,
- private_ip_allocation_method=config.private_ip_allocation_method,
- primary=config.primary,
- load_balancer_backend_address_pools=([item.id for item in config.load_balancer_backend_address_pools]
- if config.load_balancer_backend_address_pools else None),
- public_ip_address=config.public_ip_address.id if config.public_ip_address else None,
- public_ip_allocation_method=config.public_ip_address.public_ip_allocation_method if config.public_ip_address else None,
- application_security_groups=([asg.id for asg in config.application_security_groups]
- if config.application_security_groups else None)
- ) for config in nic.ip_configurations
- ]
- config = nic.ip_configurations[0] if len(nic.ip_configurations) > 0 else None
- subnet_dict = azure_id_to_dict(config.subnet.id) if config and config.subnet else None
- subnet = subnet_dict.get('subnets') if subnet_dict else None
- virtual_network = dict(
- resource_group=subnet_dict.get('resourceGroups'),
- name=subnet_dict.get('virtualNetworks')) if subnet_dict else None
- return dict(
- id=nic.id,
- resource_group=azure_id_to_dict(nic.id).get('resourceGroups'),
- name=nic.name,
- subnet=subnet,
- virtual_network=virtual_network,
- location=nic.location,
- tags=nic.tags,
- security_group=nic.network_security_group.id if nic.network_security_group else None,
- dns_settings=dict(
- dns_servers=nic.dns_settings.dns_servers,
- applied_dns_servers=nic.dns_settings.applied_dns_servers,
- internal_dns_name_label=nic.dns_settings.internal_dns_name_label,
- internal_fqdn=nic.dns_settings.internal_fqdn
- ),
- ip_configurations=ip_configurations,
- mac_address=nic.mac_address,
- enable_ip_forwarding=nic.enable_ip_forwarding,
- provisioning_state=nic.provisioning_state,
- enable_accelerated_networking=nic.enable_accelerated_networking,
- dns_servers=nic.dns_settings.dns_servers,
- )
-
-
-class AzureRMNetworkInterfaceInfo(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str'),
- resource_group=dict(type='str'),
- tags=dict(type='list')
- )
-
- self.results = dict(
- changed=False,
- )
-
- self.name = None
- self.resource_group = None
- self.tags = None
-
- super(AzureRMNetworkInterfaceInfo, self).__init__(self.module_arg_spec,
- supports_tags=False,
- facts_module=True
- )
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_networkinterface_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_networkinterface_facts' module has been renamed to 'azure_rm_networkinterface_info'",
- version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name and not self.resource_group:
- self.fail("Parameter error: resource group required when filtering by name.")
-
- results = []
-
- if self.name:
- results = self.get_item()
- elif self.resource_group:
- results = self.list_resource_group()
- else:
- results = self.list_all()
-
- if is_old_facts:
- self.results['ansible_facts'] = {
- 'azure_networkinterfaces': self.serialize_nics(results)
- }
- self.results['networkinterfaces'] = self.to_dict_list(results)
- return self.results
-
- def get_item(self):
- self.log('Get properties for {0}'.format(self.name))
- item = None
- try:
- item = self.network_client.network_interfaces.get(self.resource_group, self.name)
- except Exception:
- pass
-
- return [item] if item and self.has_tags(item.tags, self.tags) else []
-
- def list_resource_group(self):
- self.log('List for resource group')
- try:
- response = self.network_client.network_interfaces.list(self.resource_group)
- return [item for item in response if self.has_tags(item.tags, self.tags)]
- except Exception as exc:
- self.fail("Error listing by resource group {0} - {1}".format(self.resource_group, str(exc)))
-
- def list_all(self):
- self.log('List all')
- try:
- response = self.network_client.network_interfaces.list_all()
- return [item for item in response if self.has_tags(item.tags, self.tags)]
- except Exception as exc:
- self.fail("Error listing all - {0}".format(str(exc)))
-
- def serialize_nics(self, raws):
- return [self.serialize_obj(item, AZURE_OBJECT_CLASS) for item in raws] if raws else []
-
- def to_dict_list(self, raws):
- return [nic_to_dict(item) for item in raws] if raws else []
-
-
-def main():
- AzureRMNetworkInterfaceInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_postgresqlconfiguration.py b/lib/ansible/modules/cloud/azure/azure_rm_postgresqlconfiguration.py
deleted file mode 100644
index 8cd9daa82d..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_postgresqlconfiguration.py
+++ /dev/null
@@ -1,240 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_postgresqlconfiguration
-version_added: "2.8"
-short_description: Manage Azure PostgreSQL Configuration
-description:
- - Update or reset Azure PostgreSQL Configuration setting.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource.
- required: True
- server_name:
- description:
- - The name of the server.
- required: True
- name:
- description:
- - Setting name.
- required: True
- value:
- description:
- - Setting value.
- state:
- description:
- - Assert the state of the PostgreSQL setting. Use C(present) to update setting, or C(absent) to reset to default value.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Update PostgreSQL Server setting
- azure_rm_postgresqlconfiguration:
- resource_group: myResourceGroup
- server_name: myServer
- name: deadlock_timeout
- value: 2000
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforPostgreSQL/servers/myServer/confi
- gurations/event_scheduler"
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from azure.mgmt.rdbms.postgresql import MySQLManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMPostgreSqlConfigurations(AzureRMModuleBase):
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- value=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.server_name = None
- self.name = None
- self.value = None
-
- self.results = dict(changed=False)
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMPostgreSqlConfigurations, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
-
- old_response = None
- response = None
-
- old_response = self.get_configuration()
-
- if not old_response:
- self.log("Configuration instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("Configuration instance already exists")
- if self.state == 'absent' and old_response['source'] == 'user-override':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.log("Need to check if Configuration instance has to be deleted or may be updated")
- if self.value != old_response.get('value'):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the Configuration instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_configuration()
-
- self.results['changed'] = True
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("Configuration instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_configuration()
- else:
- self.log("Configuration instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
-
- return self.results
-
- def create_update_configuration(self):
- self.log("Creating / Updating the Configuration instance {0}".format(self.name))
-
- try:
- response = self.postgresql_client.configurations.create_or_update(resource_group_name=self.resource_group,
- server_name=self.server_name,
- configuration_name=self.name,
- value=self.value,
- source='user-override')
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the Configuration instance.')
- self.fail("Error creating the Configuration instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_configuration(self):
- self.log("Deleting the Configuration instance {0}".format(self.name))
- try:
- response = self.postgresql_client.configurations.create_or_update(resource_group_name=self.resource_group,
- server_name=self.server_name,
- configuration_name=self.name,
- source='system-default')
- except CloudError as e:
- self.log('Error attempting to delete the Configuration instance.')
- self.fail("Error deleting the Configuration instance: {0}".format(str(e)))
-
- return True
-
- def get_configuration(self):
- self.log("Checking if the Configuration instance {0} is present".format(self.name))
- found = False
- try:
- response = self.postgresql_client.configurations.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- configuration_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("Configuration instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the Configuration instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMPostgreSqlConfigurations()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_postgresqlconfiguration_info.py b/lib/ansible/modules/cloud/azure/azure_rm_postgresqlconfiguration_info.py
deleted file mode 100644
index 3b45737b55..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_postgresqlconfiguration_info.py
+++ /dev/null
@@ -1,217 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_postgresqlconfiguration_info
-version_added: "2.9"
-short_description: Get Azure PostgreSQL Configuration facts
-description:
- - Get facts of Azure PostgreSQL Configuration.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource.
- required: True
- type: str
- server_name:
- description:
- - The name of the server.
- required: True
- type: str
- name:
- description:
- - Setting name.
- type: str
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get specific setting of PostgreSQL configuration
- azure_rm_postgresqlconfiguration_info:
- resource_group: myResourceGroup
- server_name: testpostgresqlserver
- name: deadlock_timeout
-
- - name: Get all settings of PostgreSQL Configuration
- azure_rm_postgresqlconfiguration_info:
- resource_group: myResourceGroup
- server_name: testpostgresqlserver
-'''
-
-RETURN = '''
-settings:
- description:
- - A list of dictionaries containing MySQL Server settings.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Setting resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/testrg/providers/Microsoft.DBforPostgreSQL/servers/testpostgresqlser
- ver/configurations/deadlock_timeout"
- name:
- description:
- - Setting name.
- returned: always
- type: str
- sample: deadlock_timeout
- value:
- description:
- - Setting value.
- returned: always
- type: raw
- sample: 1000
- description:
- description:
- - Description of the configuration.
- returned: always
- type: str
- sample: Deadlock timeout.
- source:
- description:
- - Source of the configuration.
- returned: always
- type: str
- sample: system-default
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.azure_operation import AzureOperationPoller
- from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMPostgreSQLConfigurationInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.server_name = None
- self.name = None
- super(AzureRMPostgreSQLConfigurationInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_postgresqlconfiguration_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_postgresqlconfiguration_facts' module has been renamed to 'azure_rm_postgresqlconfiguration_info'",
- version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(PostgreSQLManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if self.name is not None:
- self.results['settings'] = self.get()
- else:
- self.results['settings'] = self.list_by_server()
- return self.results
-
- def get(self):
- '''
- Gets facts of the specified PostgreSQL Configuration.
-
- :return: deserialized PostgreSQL Configurationinstance state dictionary
- '''
- response = None
- results = []
- try:
- response = self.mgmt_client.configurations.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- configuration_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not get requested setting.')
-
- if response is not None:
- results.append(self.format_item(response))
-
- return results
-
- def list_by_server(self):
- '''
- Gets facts of the specified PostgreSQL Configuration.
-
- :return: deserialized PostgreSQL Configurationinstance state dictionary
- '''
- response = None
- results = []
- try:
- response = self.mgmt_client.configurations.list_by_server(resource_group_name=self.resource_group,
- server_name=self.server_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not get settings for server.')
-
- if response is not None:
- for item in response:
- results.append(self.format_item(item))
-
- return results
-
- def format_item(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.resource_group,
- 'server_name': self.server_name,
- 'id': d['id'],
- 'name': d['name'],
- 'value': d['value'],
- 'description': d['description'],
- 'source': d['source']
- }
- return d
-
-
-def main():
- AzureRMPostgreSQLConfigurationInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_postgresqldatabase.py b/lib/ansible/modules/cloud/azure/azure_rm_postgresqldatabase.py
deleted file mode 100644
index 2d734807e9..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_postgresqldatabase.py
+++ /dev/null
@@ -1,303 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_postgresqldatabase
-version_added: "2.5"
-short_description: Manage PostgreSQL Database instance
-description:
- - Create, update and delete instance of PostgreSQL Database.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- server_name:
- description:
- - The name of the server.
- required: True
- name:
- description:
- - The name of the database.
- required: True
- charset:
- description:
- - The charset of the database. Check PostgreSQL documentation for possible values.
- - This is only set on creation, use I(force_update) to recreate a database if the values don't match.
- collation:
- description:
- - The collation of the database. Check PostgreSQL documentation for possible values.
- - This is only set on creation, use I(force_update) to recreate a database if the values don't match.
- force_update:
- description:
- - When set to C(true), will delete and recreate the existing PostgreSQL database if any of the properties don't match what is set.
- - When set to C(false), no change will occur to the database even if any of the properties do not match.
- type: bool
- default: 'no'
- state:
- description:
- - Assert the state of the PostgreSQL database. Use C(present) to create or update a database and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Create (or update) PostgreSQL Database
- azure_rm_postgresqldatabase:
- resource_group: myResourceGroup
- server_name: testserver
- name: db1
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroups/providers/Microsoft.DBforPostgreSQL/servers/testserve
- r/databases/db1"
-name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: db1
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMPostgreSqlDatabases(AzureRMModuleBase):
- """Configuration class for an Azure RM PostgreSQL Database resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- charset=dict(
- type='str'
- ),
- collation=dict(
- type='str'
- ),
- force_update=dict(
- type='bool',
- default=False
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.server_name = None
- self.name = None
- self.force_update = None
- self.parameters = dict()
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMPostgreSqlDatabases, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- if key == "charset":
- self.parameters["charset"] = kwargs[key]
- elif key == "collation":
- self.parameters["collation"] = kwargs[key]
-
- old_response = None
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(PostgreSQLManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- resource_group = self.get_resource_group(self.resource_group)
-
- old_response = self.get_postgresqldatabase()
-
- if not old_response:
- self.log("PostgreSQL Database instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("PostgreSQL Database instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.log("Need to check if PostgreSQL Database instance has to be deleted or may be updated")
- if ('collation' in self.parameters) and (self.parameters['collation'] != old_response['collation']):
- self.to_do = Actions.Update
- if ('charset' in self.parameters) and (self.parameters['charset'] != old_response['charset']):
- self.to_do = Actions.Update
- if self.to_do == Actions.Update:
- if self.force_update:
- if not self.check_mode:
- self.delete_postgresqldatabase()
- else:
- self.fail("Database properties cannot be updated without setting 'force_update' option")
- self.to_do = Actions.NoAction
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the PostgreSQL Database instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_postgresqldatabase()
- self.results['changed'] = True
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("PostgreSQL Database instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_postgresqldatabase()
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_postgresqldatabase():
- time.sleep(20)
- else:
- self.log("PostgreSQL Database instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
- self.results["name"] = response["name"]
-
- return self.results
-
- def create_update_postgresqldatabase(self):
- '''
- Creates or updates PostgreSQL Database with the specified configuration.
-
- :return: deserialized PostgreSQL Database instance state dictionary
- '''
- self.log("Creating / Updating the PostgreSQL Database instance {0}".format(self.name))
-
- try:
- response = self.mgmt_client.databases.create_or_update(resource_group_name=self.resource_group,
- server_name=self.server_name,
- database_name=self.name,
- parameters=self.parameters)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the PostgreSQL Database instance.')
- self.fail("Error creating the PostgreSQL Database instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_postgresqldatabase(self):
- '''
- Deletes specified PostgreSQL Database instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the PostgreSQL Database instance {0}".format(self.name))
- try:
- response = self.mgmt_client.databases.delete(resource_group_name=self.resource_group,
- server_name=self.server_name,
- database_name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the PostgreSQL Database instance.')
- self.fail("Error deleting the PostgreSQL Database instance: {0}".format(str(e)))
-
- return True
-
- def get_postgresqldatabase(self):
- '''
- Gets the properties of the specified PostgreSQL Database.
-
- :return: deserialized PostgreSQL Database instance state dictionary
- '''
- self.log("Checking if the PostgreSQL Database instance {0} is present".format(self.name))
- found = False
- try:
- response = self.mgmt_client.databases.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- database_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("PostgreSQL Database instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the PostgreSQL Database instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMPostgreSqlDatabases()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_postgresqldatabase_info.py b/lib/ansible/modules/cloud/azure/azure_rm_postgresqldatabase_info.py
deleted file mode 100644
index 115b8fa6f2..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_postgresqldatabase_info.py
+++ /dev/null
@@ -1,209 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_postgresqldatabase_info
-version_added: "2.9"
-short_description: Get Azure PostgreSQL Database facts
-description:
- - Get facts of PostgreSQL Database.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- type: str
- server_name:
- description:
- - The name of the server.
- required: True
- type: str
- name:
- description:
- - The name of the database.
- type: str
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of PostgreSQL Database
- azure_rm_postgresqldatabase_info:
- resource_group: myResourceGroup
- server_name: server_name
- name: database_name
-
- - name: List instances of PostgreSQL Database
- azure_rm_postgresqldatabase_info:
- resource_group: myResourceGroup
- server_name: server_name
-'''
-
-RETURN = '''
-databases:
- description:
- - A list of dict results where the key is the name of the PostgreSQL Database and the values are the facts for that PostgreSQL Database.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforPostgreSQL/servers/testser
- ver/databases/db1"
- resource_group:
- description:
- - Resource group name.
- returned: always
- type: str
- sample: testrg
- server_name:
- description:
- - Server name.
- returned: always
- type: str
- sample: testserver
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: db1
- charset:
- description:
- - The charset of the database.
- returned: always
- type: str
- sample: UTF8
- collation:
- description:
- - The collation of the database.
- returned: always
- type: str
- sample: English_United States.1252
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMPostgreSqlDatabasesInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.resource_group = None
- self.server_name = None
- self.name = None
- super(AzureRMPostgreSqlDatabasesInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_postgresqldatabase_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_postgresqldatabase_facts' module has been renamed to 'azure_rm_postgresqldatabase_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if (self.resource_group is not None and
- self.server_name is not None and
- self.name is not None):
- self.results['databases'] = self.get()
- elif (self.resource_group is not None and
- self.server_name is not None):
- self.results['databases'] = self.list_by_server()
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.postgresql_client.databases.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- database_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Databases.')
-
- if response is not None:
- results.append(self.format_item(response))
-
- return results
-
- def list_by_server(self):
- response = None
- results = []
- try:
- response = self.postgresql_client.databases.list_by_server(resource_group_name=self.resource_group,
- server_name=self.server_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail("Error listing for server {0} - {1}".format(self.server_name, str(e)))
-
- if response is not None:
- for item in response:
- results.append(self.format_item(item))
-
- return results
-
- def format_item(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.resource_group,
- 'server_name': self.server_name,
- 'name': d['name'],
- 'charset': d['charset'],
- 'collation': d['collation']
- }
- return d
-
-
-def main():
- AzureRMPostgreSqlDatabasesInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_postgresqlfirewallrule.py b/lib/ansible/modules/cloud/azure/azure_rm_postgresqlfirewallrule.py
deleted file mode 100644
index 66642198d1..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_postgresqlfirewallrule.py
+++ /dev/null
@@ -1,275 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_postgresqlfirewallrule
-version_added: "2.8"
-short_description: Manage PostgreSQL firewall rule instance
-description:
- - Create, update and delete instance of PostgreSQL firewall rule.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- server_name:
- description:
- - The name of the server.
- required: True
- name:
- description:
- - The name of the PostgreSQL firewall rule.
- required: True
- start_ip_address:
- description:
- - The start IP address of the PostgreSQL firewall rule. Must be IPv4 format.
- end_ip_address:
- description:
- - The end IP address of the PostgreSQL firewall rule. Must be IPv4 format.
- state:
- description:
- - Assert the state of the PostgreSQL firewall rule. Use C(present) to create or update a PostgreSQL firewall rule and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Create (or update) PostgreSQL firewall rule
- azure_rm_postgresqlfirewallrule:
- resource_group: myResourceGroup
- server_name: testserver
- name: rule1
- start_ip_address: 10.0.0.16
- end_ip_address: 10.0.0.18
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforPostgreSQL/servers/testserver
- /firewallRules/rule1"
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMPostgreSqlFirewallRules(AzureRMModuleBase):
- """Configuration class for an Azure RM PostgreSQL firewall rule resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- start_ip_address=dict(
- type='str'
- ),
- end_ip_address=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.server_name = None
- self.name = None
- self.start_ip_address = None
- self.end_ip_address = None
-
- self.results = dict(changed=False)
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMPostgreSqlFirewallRules, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
-
- old_response = None
- response = None
-
- resource_group = self.get_resource_group(self.resource_group)
-
- old_response = self.get_firewallrule()
-
- if not old_response:
- self.log("PostgreSQL firewall rule instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("PostgreSQL firewall rule instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.log("Need to check if PostgreSQL firewall rule instance has to be deleted or may be updated")
- if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']):
- self.to_do = Actions.Update
- if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the PostgreSQL firewall rule instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_firewallrule()
-
- if not old_response:
- self.results['changed'] = True
- else:
- self.results['changed'] = old_response.__ne__(response)
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("PostgreSQL firewall rule instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_firewallrule()
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_firewallrule():
- time.sleep(20)
- else:
- self.log("PostgreSQL firewall rule instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
-
- return self.results
-
- def create_update_firewallrule(self):
- '''
- Creates or updates PostgreSQL firewall rule with the specified configuration.
-
- :return: deserialized PostgreSQL firewall rule instance state dictionary
- '''
- self.log("Creating / Updating the PostgreSQL firewall rule instance {0}".format(self.name))
-
- try:
- response = self.postgresql_client.firewall_rules.create_or_update(resource_group_name=self.resource_group,
- server_name=self.server_name,
- firewall_rule_name=self.name,
- start_ip_address=self.start_ip_address,
- end_ip_address=self.end_ip_address)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the PostgreSQL firewall rule instance.')
- self.fail("Error creating the PostgreSQL firewall rule instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_firewallrule(self):
- '''
- Deletes specified PostgreSQL firewall rule instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the PostgreSQL firewall rule instance {0}".format(self.name))
- try:
- response = self.postgresql_client.firewall_rules.delete(resource_group_name=self.resource_group,
- server_name=self.server_name,
- firewall_rule_name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the PostgreSQL firewall rule instance.')
- self.fail("Error deleting the PostgreSQL firewall rule instance: {0}".format(str(e)))
-
- return True
-
- def get_firewallrule(self):
- '''
- Gets the properties of the specified PostgreSQL firewall rule.
-
- :return: deserialized PostgreSQL firewall rule instance state dictionary
- '''
- self.log("Checking if the PostgreSQL firewall rule instance {0} is present".format(self.name))
- found = False
- try:
- response = self.postgresql_client.firewall_rules.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- firewall_rule_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("PostgreSQL firewall rule instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the PostgreSQL firewall rule instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMPostgreSqlFirewallRules()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_postgresqlfirewallrule_info.py b/lib/ansible/modules/cloud/azure/azure_rm_postgresqlfirewallrule_info.py
deleted file mode 100644
index dae3140aaa..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_postgresqlfirewallrule_info.py
+++ /dev/null
@@ -1,205 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_postgresqlfirewallrule_info
-version_added: "2.9"
-short_description: Get Azure PostgreSQL Firewall Rule facts
-description:
- - Get facts of Azure PostgreSQL Firewall Rule.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- type: str
- server_name:
- description:
- - The name of the server.
- required: True
- type: str
- name:
- description:
- - The name of the server firewall rule.
- type: str
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of PostgreSQL Firewall Rule
- azure_rm_postgresqlfirewallrule_info:
- resource_group: myResourceGroup
- server_name: server_name
- name: firewall_rule_name
-
- - name: List instances of PostgreSQL Firewall Rule
- azure_rm_postgresqlfirewallrule_info:
- resource_group: myResourceGroup
- server_name: server_name
-'''
-
-RETURN = '''
-rules:
- description:
- - A list of dictionaries containing facts for PostgreSQL Firewall Rule.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.DBforPostgreSQL/servers/testserver/fire
- wallRules/rule1"
- server_name:
- description:
- - The name of the server.
- returned: always
- type: str
- sample: testserver
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: rule1
- start_ip_address:
- description:
- - The start IP address of the PostgreSQL firewall rule.
- returned: always
- type: str
- sample: 10.0.0.16
- end_ip_address:
- description:
- - The end IP address of the PostgreSQL firewall rule.
- returned: always
- type: str
- sample: 10.0.0.18
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMPostgreSQLFirewallRulesInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.server_name = None
- self.name = None
- super(AzureRMPostgreSQLFirewallRulesInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_postgresqlfirewallrule_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_postgresqlfirewallrule_facts' module has been renamed to 'azure_rm_postgresqlfirewallrule_info'",
- version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(PostgreSQLManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if (self.name is not None):
- self.results['rules'] = self.get()
- else:
- self.results['rules'] = self.list_by_server()
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.firewall_rules.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- firewall_rule_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for FirewallRules.')
-
- if response is not None:
- results.append(self.format_item(response))
-
- return results
-
- def list_by_server(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.firewall_rules.list_by_server(resource_group_name=self.resource_group,
- server_name=self.server_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for FirewallRules.')
-
- if response is not None:
- for item in response:
- results.append(self.format_item(item))
-
- return results
-
- def format_item(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.resource_group,
- 'id': d['id'],
- 'server_name': self.server_name,
- 'name': d['name'],
- 'start_ip_address': d['start_ip_address'],
- 'end_ip_address': d['end_ip_address']
- }
- return d
-
-
-def main():
- AzureRMPostgreSQLFirewallRulesInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_postgresqlserver.py b/lib/ansible/modules/cloud/azure/azure_rm_postgresqlserver.py
deleted file mode 100644
index 768045202a..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_postgresqlserver.py
+++ /dev/null
@@ -1,387 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_postgresqlserver
-version_added: "2.5"
-short_description: Manage PostgreSQL Server instance
-description:
- - Create, update and delete instance of PostgreSQL Server.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- name:
- description:
- - The name of the server.
- required: True
- sku:
- description:
- - The SKU (pricing tier) of the server.
- suboptions:
- name:
- description:
- - The name of the SKU, typically, tier + family + cores, for example C(B_Gen4_1), C(GP_Gen5_8).
- tier:
- description:
- - The tier of the particular SKU, for example C(Basic).
- choices:
- - Basic
- - Standard
- capacity:
- description:
- - The scale up/out capacity, representing server's compute units.
- size:
- description:
- - The size code, to be interpreted by resource as appropriate.
- location:
- description:
- - Resource location. If not set, location from the resource group will be used as default.
- storage_mb:
- description:
- - The maximum storage allowed for a server.
- type: int
- version:
- description:
- - Server version.
- choices:
- - '9.5'
- - '9.6'
- - '10'
- - '11'
- enforce_ssl:
- description:
- - Enable SSL enforcement.
- type: bool
- default: False
- admin_username:
- description:
- - The administrator's login name of a server. Can only be specified when the server is being created (and is required for creation).
- admin_password:
- description:
- - The password of the administrator login.
- create_mode:
- description:
- - Create mode of SQL Server.
- default: Default
- state:
- description:
- - Assert the state of the PostgreSQL server. Use C(present) to create or update a server and C(absent) to delete it.
- default: present
- choices:
- - present
- - absent
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Create (or update) PostgreSQL Server
- azure_rm_postgresqlserver:
- resource_group: myResourceGroup
- name: testserver
- sku:
- name: B_Gen5_1
- tier: Basic
- location: eastus
- storage_mb: 1024
- enforce_ssl: True
- admin_username: cloudsa
- admin_password: password
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforPostgreSQL/servers/mysqlsrv1b6dd89593
-version:
- description:
- - Server version. Possible values include C(9.5), C(9.6), C(10), C(11).
- returned: always
- type: str
- sample: 9.6
-state:
- description:
- - A state of a server that is visible to user. Possible values include C(Ready), C(Dropping), C(Disabled).
- returned: always
- type: str
- sample: Ready
-fully_qualified_domain_name:
- description:
- - The fully qualified domain name of a server.
- returned: always
- type: str
- sample: postgresqlsrv1b6dd89593.postgresql.database.azure.com
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMPostgreSqlServers(AzureRMModuleBase):
- """Configuration class for an Azure RM PostgreSQL Server resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- sku=dict(
- type='dict'
- ),
- location=dict(
- type='str'
- ),
- storage_mb=dict(
- type='int'
- ),
- version=dict(
- type='str',
- choices=['9.5', '9.6', '10', '11']
- ),
- enforce_ssl=dict(
- type='bool',
- default=False
- ),
- create_mode=dict(
- type='str',
- default='Default'
- ),
- admin_username=dict(
- type='str'
- ),
- admin_password=dict(
- type='str',
- no_log=True
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.name = None
- self.parameters = dict()
- self.tags = None
-
- self.results = dict(changed=False)
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMPostgreSqlServers, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- if key == "sku":
- ev = kwargs[key]
- if 'tier' in ev:
- if ev['tier'] == 'basic':
- ev['tier'] = 'Basic'
- elif ev['tier'] == 'standard':
- ev['tier'] = 'Standard'
- self.parameters["sku"] = ev
- elif key == "location":
- self.parameters["location"] = kwargs[key]
- elif key == "storage_mb":
- self.parameters.setdefault("properties", {}).setdefault("storage_profile", {})["storage_mb"] = kwargs[key]
- elif key == "version":
- self.parameters.setdefault("properties", {})["version"] = kwargs[key]
- elif key == "enforce_ssl":
- self.parameters.setdefault("properties", {})["ssl_enforcement"] = 'Enabled' if kwargs[key] else 'Disabled'
- elif key == "create_mode":
- self.parameters.setdefault("properties", {})["create_mode"] = kwargs[key]
- elif key == "admin_username":
- self.parameters.setdefault("properties", {})["administrator_login"] = kwargs[key]
- elif key == "admin_password":
- self.parameters.setdefault("properties", {})["administrator_login_password"] = kwargs[key]
-
- old_response = None
- response = None
-
- resource_group = self.get_resource_group(self.resource_group)
-
- if "location" not in self.parameters:
- self.parameters["location"] = resource_group.location
-
- old_response = self.get_postgresqlserver()
-
- if not old_response:
- self.log("PostgreSQL Server instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("PostgreSQL Server instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.log("Need to check if PostgreSQL Server instance has to be deleted or may be updated")
- update_tags, newtags = self.update_tags(old_response.get('tags', {}))
- if update_tags:
- self.tags = newtags
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the PostgreSQL Server instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_postgresqlserver()
-
- if not old_response:
- self.results['changed'] = True
- else:
- self.results['changed'] = old_response.__ne__(response)
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("PostgreSQL Server instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_postgresqlserver()
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_postgresqlserver():
- time.sleep(20)
- else:
- self.log("PostgreSQL Server instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
- self.results["version"] = response["version"]
- self.results["state"] = response["user_visible_state"]
- self.results["fully_qualified_domain_name"] = response["fully_qualified_domain_name"]
-
- return self.results
-
- def create_update_postgresqlserver(self):
- '''
- Creates or updates PostgreSQL Server with the specified configuration.
-
- :return: deserialized PostgreSQL Server instance state dictionary
- '''
- self.log("Creating / Updating the PostgreSQL Server instance {0}".format(self.name))
-
- try:
- self.parameters['tags'] = self.tags
- if self.to_do == Actions.Create:
- response = self.postgresql_client.servers.create(resource_group_name=self.resource_group,
- server_name=self.name,
- parameters=self.parameters)
- else:
- # structure of parameters for update must be changed
- self.parameters.update(self.parameters.pop("properties", {}))
- response = self.postgresql_client.servers.update(resource_group_name=self.resource_group,
- server_name=self.name,
- parameters=self.parameters)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the PostgreSQL Server instance.')
- self.fail("Error creating the PostgreSQL Server instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_postgresqlserver(self):
- '''
- Deletes specified PostgreSQL Server instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the PostgreSQL Server instance {0}".format(self.name))
- try:
- response = self.postgresql_client.servers.delete(resource_group_name=self.resource_group,
- server_name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the PostgreSQL Server instance.')
- self.fail("Error deleting the PostgreSQL Server instance: {0}".format(str(e)))
-
- return True
-
- def get_postgresqlserver(self):
- '''
- Gets the properties of the specified PostgreSQL Server.
-
- :return: deserialized PostgreSQL Server instance state dictionary
- '''
- self.log("Checking if the PostgreSQL Server instance {0} is present".format(self.name))
- found = False
- try:
- response = self.postgresql_client.servers.get(resource_group_name=self.resource_group,
- server_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("PostgreSQL Server instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the PostgreSQL Server instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMPostgreSqlServers()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_postgresqlserver_info.py b/lib/ansible/modules/cloud/azure/azure_rm_postgresqlserver_info.py
deleted file mode 100644
index b19d649d10..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_postgresqlserver_info.py
+++ /dev/null
@@ -1,263 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_postgresqlserver_info
-version_added: "2.9"
-short_description: Get Azure PostgreSQL Server facts
-description:
- - Get facts of PostgreSQL Server.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- type: str
- name:
- description:
- - The name of the server.
- type: str
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- type: list
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of PostgreSQL Server
- azure_rm_postgresqlserver_info:
- resource_group: myResourceGroup
- name: server_name
-
- - name: List instances of PostgreSQL Server
- azure_rm_postgresqlserver_info:
- resource_group: myResourceGroup
-'''
-
-RETURN = '''
-servers:
- description:
- - A list of dictionaries containing facts for PostgreSQL servers.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.DBforPostgreSQL/servers/po
- stgreabdud1223"
- resource_group:
- description:
- - Resource group name.
- returned: always
- type: str
- sample: myResourceGroup
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: postgreabdud1223
- location:
- description:
- - The location the resource resides in.
- returned: always
- type: str
- sample: eastus
- sku:
- description:
- - The SKU of the server.
- returned: always
- type: complex
- contains:
- name:
- description:
- - The name of the SKU.
- returned: always
- type: str
- sample: GP_Gen4_2
- tier:
- description:
- - The tier of the particular SKU.
- returned: always
- type: str
- sample: GeneralPurpose
- capacity:
- description:
- - The scale capacity.
- returned: always
- type: int
- sample: 2
- storage_mb:
- description:
- - The maximum storage allowed for a server.
- returned: always
- type: int
- sample: 128000
- enforce_ssl:
- description:
- - Enable SSL enforcement.
- returned: always
- type: bool
- sample: False
- admin_username:
- description:
- - The administrator's login name of a server.
- returned: always
- type: str
- sample: serveradmin
- version:
- description:
- - Server version.
- returned: always
- type: str
- sample: "9.6"
- user_visible_state:
- description:
- - A state of a server that is visible to user.
- returned: always
- type: str
- sample: Ready
- fully_qualified_domain_name:
- description:
- - The fully qualified domain name of a server.
- returned: always
- type: str
- sample: postgreabdud1223.postgres.database.azure.com
- tags:
- description:
- - Tags assigned to the resource. Dictionary of string:string pairs.
- type: dict
- sample: { tag1: abc }
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.rdbms.postgresql import PostgreSQLManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMPostgreSqlServersInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.resource_group = None
- self.name = None
- self.tags = None
- super(AzureRMPostgreSqlServersInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_postgresqlserver_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_postgresqlserver_facts' module has been renamed to 'azure_rm_postgresqlserver_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if (self.resource_group is not None and
- self.name is not None):
- self.results['servers'] = self.get()
- elif (self.resource_group is not None):
- self.results['servers'] = self.list_by_resource_group()
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.postgresql_client.servers.get(resource_group_name=self.resource_group,
- server_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for PostgreSQL Server.')
-
- if response and self.has_tags(response.tags, self.tags):
- results.append(self.format_item(response))
-
- return results
-
- def list_by_resource_group(self):
- response = None
- results = []
- try:
- response = self.postgresql_client.servers.list_by_resource_group(resource_group_name=self.resource_group)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for PostgreSQL Servers.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_item(item))
-
- return results
-
- def format_item(self, item):
- d = item.as_dict()
- d = {
- 'id': d['id'],
- 'resource_group': self.resource_group,
- 'name': d['name'],
- 'sku': d['sku'],
- 'location': d['location'],
- 'storage_mb': d['storage_profile']['storage_mb'],
- 'version': d['version'],
- 'enforce_ssl': (d['ssl_enforcement'] == 'Enabled'),
- 'admin_username': d['administrator_login'],
- 'user_visible_state': d['user_visible_state'],
- 'fully_qualified_domain_name': d['fully_qualified_domain_name'],
- 'tags': d.get('tags')
- }
-
- return d
-
-
-def main():
- AzureRMPostgreSqlServersInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_publicipaddress.py b/lib/ansible/modules/cloud/azure/azure_rm_publicipaddress.py
deleted file mode 100644
index 1a3435a207..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_publicipaddress.py
+++ /dev/null
@@ -1,426 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_publicipaddress
-
-version_added: "2.1"
-
-short_description: Manage Azure Public IP Addresses
-
-description:
- - Create, update and delete a Public IP address.
- - Allows setting and updating the address allocation method and domain name label.
- - Use the M(azure_rm_networkinterface) module to associate a Public IP with a network interface.
-
-options:
- resource_group:
- description:
- - Name of resource group with which the Public IP is associated.
- required: true
- allocation_method:
- description:
- - Control whether the assigned Public IP remains permanently assigned to the object.
- - If not set to C(Static), the IP address my changed anytime an associated virtual machine is power cycled.
- choices:
- - dynamic
- - static
- - Static
- - Dynamic
- default: dynamic
- domain_name:
- description:
- - The customizable portion of the FQDN assigned to public IP address. This is an explicit setting.
- - If no value is provided, any existing value will be removed on an existing public IP.
- aliases:
- - domain_name_label
- name:
- description:
- - Name of the Public IP.
- required: true
- state:
- description:
- - Assert the state of the Public IP. Use C(present) to create or update a and C(absent) to delete.
- default: present
- choices:
- - absent
- - present
- location:
- description:
- - Valid Azure location. Defaults to location of the resource group.
- sku:
- description:
- - The public IP address SKU.
- choices:
- - basic
- - standard
- - Basic
- - Standard
- version_added: "2.6"
- ip_tags:
- description:
- - List of IpTag associated with the public IP address.
- - Each element should contain type:value pair.
- suboptions:
- type:
- description:
- - Sets the ip_tags type.
- value:
- description:
- - Sets the ip_tags value.
- version_added: "2.8"
- idle_timeout:
- description:
- - Idle timeout in minutes.
- type: int
- version_added: "2.8"
- version:
- description:
- - The public IP address version.
- choices:
- - ipv4
- - ipv6
- default: ipv4
- version_added: "2.8"
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Chris Houseknecht (@chouseknecht)
- - Matt Davis (@nitzmahone)
-'''
-
-EXAMPLES = '''
- - name: Create a public ip address
- azure_rm_publicipaddress:
- resource_group: myResourceGroup
- name: my_public_ip
- allocation_method: static
- domain_name: foobar
-
- - name: Delete public ip
- azure_rm_publicipaddress:
- resource_group: myResourceGroup
- name: my_public_ip
- state: absent
-'''
-
-RETURN = '''
-state:
- description:
- - Facts about the current state of the object.
- returned: always
- type: complex
- contains:
- dns_settings:
- description:
- - The FQDN of the DNS record associated with the public IP address.
- returned: always
- type: dict
- sample: {
- "domain_name_label": "ansible-b57dc95985712e45eb8b9c2e",
- "fqdn": "ansible-b57dc95985712e45eb8b9c2e.eastus.cloudapp.azure.com",
- "reverse_fqdn": null
- }
- etag:
- description:
- - A unique read-only string that changes whenever the resource is updated.
- returned: always
- type: str
- sample: "W/'1905ee13-7623-45b1-bc6b-4a12b2fb9d15'"
- idle_timeout_in_minutes:
- description:
- - The idle timeout of the public IP address.
- returned: always
- type: int
- sample: 4
- ip_address:
- description:
- - The Public IP Prefix this Public IP Address should be allocated from.
- returned: always
- type: str
- sample: 52.160.103.93
- location:
- description:
- - Resource location.
- returned: always
- type: str
- example: eastus
- name:
- description:
- - Name of the Public IP Address.
- returned: always
- type: str
- example: publicip002
- provisioning_state:
- description:
- - The provisioning state of the Public IP resource.
- returned: always
- type: str
- example: Succeeded
- public_ip_allocation_method:
- description:
- - The public IP allocation method.
- returned: always
- type: str
- sample: static
- public_ip_address_version:
- description:
- - The public IP address version.
- returned: always
- type: str
- sample: ipv4
- sku:
- description:
- - The public IP address SKU.
- returned: always
- type: str
- sample: Basic
- tags:
- description:
- - The resource tags.
- returned: always
- type: dict
- sample: {
- "delete": "on-exit",
- "testing": "testing"
- }
- type:
- description:
- - Type of the resource.
- returned: always
- type: str
- sample: "Microsoft.Network/publicIPAddresses"
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils._text import to_native
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def pip_to_dict(pip):
- result = dict(
- name=pip.name,
- type=pip.type,
- location=pip.location,
- tags=pip.tags,
- public_ip_allocation_method=pip.public_ip_allocation_method.lower(),
- public_ip_address_version=pip.public_ip_address_version.lower(),
- dns_settings=dict(),
- ip_address=pip.ip_address,
- idle_timeout_in_minutes=pip.idle_timeout_in_minutes,
- provisioning_state=pip.provisioning_state,
- etag=pip.etag,
- sku=pip.sku.name
- )
- if pip.dns_settings:
- result['dns_settings']['domain_name_label'] = pip.dns_settings.domain_name_label
- result['dns_settings']['fqdn'] = pip.dns_settings.fqdn
- result['dns_settings']['reverse_fqdn'] = pip.dns_settings.reverse_fqdn
- if pip.ip_tags:
- result['ip_tags'] = [dict(type=to_native(x.ip_tag_type), value=to_native(x.tag)) for x in pip.ip_tags]
- return result
-
-
-ip_tag_spec = dict(
- type=dict(type='str', required=True),
- value=dict(type='str', required=True)
-)
-
-
-class AzureRMPublicIPAddress(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- location=dict(type='str'),
- version=dict(type='str', default='ipv4', choices=['ipv4', 'ipv6']),
- allocation_method=dict(type='str', default='dynamic', choices=['Dynamic', 'Static', 'dynamic', 'static']),
- domain_name=dict(type='str', aliases=['domain_name_label']),
- sku=dict(type='str', choices=['Basic', 'Standard', 'basic', 'standard']),
- ip_tags=dict(type='list', elements='dict', options=ip_tag_spec),
- idle_timeout=dict(type='int')
- )
-
- self.resource_group = None
- self.name = None
- self.location = None
- self.state = None
- self.tags = None
- self.allocation_method = None
- self.domain_name = None
- self.sku = None
- self.version = None
- self.ip_tags = None
- self.idle_timeout = None
-
- self.results = dict(
- changed=False,
- state=dict()
- )
-
- super(AzureRMPublicIPAddress, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- results = dict()
- changed = False
- pip = None
-
- # capitalize the sku and allocation_method. basic => Basic, Basic => Basic.
- self.allocation_method = self.allocation_method.capitalize() if self.allocation_method else None
- self.sku = self.sku.capitalize() if self.sku else None
- self.version = 'IPv4' if self.version == 'ipv4' else 'IPv6'
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- # Set default location
- self.location = resource_group.location
-
- try:
- self.log("Fetch public ip {0}".format(self.name))
- pip = self.network_client.public_ip_addresses.get(self.resource_group, self.name)
- self.check_provisioning_state(pip, self.state)
- self.log("PIP {0} exists".format(self.name))
- if self.state == 'present':
- results = pip_to_dict(pip)
- domain_lable = results['dns_settings'].get('domain_name_label')
- if self.domain_name is not None and ((self.domain_name or domain_lable) and self.domain_name != domain_lable):
- self.log('CHANGED: domain_name_label')
- changed = True
- results['dns_settings']['domain_name_label'] = self.domain_name
-
- if self.allocation_method.lower() != results['public_ip_allocation_method'].lower():
- self.log("CHANGED: allocation_method")
- changed = True
- results['public_ip_allocation_method'] = self.allocation_method
-
- if self.sku and self.sku != results['sku']:
- self.log("CHANGED: sku")
- changed = True
- results['sku'] = self.sku
-
- if self.version.lower() != results['public_ip_address_version'].lower():
- self.log("CHANGED: version")
- changed = True
- results['public_ip_address_version'] = self.version
-
- if self.idle_timeout and self.idle_timeout != results['idle_timeout_in_minutes']:
- self.log("CHANGED: idle_timeout")
- changed = True
- results['idle_timeout_in_minutes'] = self.idle_timeout
-
- if str(self.ip_tags or []) != str(results.get('ip_tags') or []):
- self.log("CHANGED: ip_tags")
- changed = True
- results['ip_tags'] = self.ip_tags
-
- update_tags, results['tags'] = self.update_tags(results['tags'])
- if update_tags:
- changed = True
-
- elif self.state == 'absent':
- self.log("CHANGED: public ip {0} exists but requested state is 'absent'".format(self.name))
- changed = True
- except CloudError:
- self.log('Public ip {0} does not exist'.format(self.name))
- if self.state == 'present':
- self.log("CHANGED: pip {0} does not exist but requested state is 'present'".format(self.name))
- changed = True
-
- self.results['state'] = results
- self.results['changed'] = changed
-
- if self.check_mode:
- return results
-
- if changed:
- if self.state == 'present':
- if not pip:
- self.log("Create new Public IP {0}".format(self.name))
- pip = self.network_models.PublicIPAddress(
- location=self.location,
- public_ip_address_version=self.version,
- public_ip_allocation_method=self.allocation_method if self.version == 'IPv4' else None,
- sku=self.network_models.PublicIPAddressSku(name=self.sku) if self.sku else None,
- idle_timeout_in_minutes=self.idle_timeout if self.idle_timeout and self.idle_timeout > 0 else None
- )
- if self.ip_tags:
- pip.ip_tags = [self.network_models.IpTag(ip_tag_type=x.type, tag=x.value) for x in self.ip_tags]
- if self.tags:
- pip.tags = self.tags
- if self.domain_name:
- pip.dns_settings = self.network_models.PublicIPAddressDnsSettings(
- domain_name_label=self.domain_name
- )
- else:
- self.log("Update Public IP {0}".format(self.name))
- pip = self.network_models.PublicIPAddress(
- location=results['location'],
- public_ip_allocation_method=results['public_ip_allocation_method'],
- tags=results['tags']
- )
- if self.domain_name:
- pip.dns_settings = self.network_models.PublicIPAddressDnsSettings(
- domain_name_label=self.domain_name
- )
- self.results['state'] = self.create_or_update_pip(pip)
- elif self.state == 'absent':
- self.log('Delete public ip {0}'.format(self.name))
- self.delete_pip()
-
- return self.results
-
- def create_or_update_pip(self, pip):
- try:
- poller = self.network_client.public_ip_addresses.create_or_update(self.resource_group, self.name, pip)
- pip = self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error creating or updating {0} - {1}".format(self.name, str(exc)))
- return pip_to_dict(pip)
-
- def delete_pip(self):
- try:
- poller = self.network_client.public_ip_addresses.delete(self.resource_group, self.name)
- self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error deleting {0} - {1}".format(self.name, str(exc)))
- # Delete returns nada. If we get here, assume that all is well.
- self.results['state']['status'] = 'Deleted'
- return True
-
-
-def main():
- AzureRMPublicIPAddress()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_publicipaddress_info.py b/lib/ansible/modules/cloud/azure/azure_rm_publicipaddress_info.py
deleted file mode 100644
index 0be71fed55..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_publicipaddress_info.py
+++ /dev/null
@@ -1,321 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_publicipaddress_info
-
-version_added: "2.9"
-
-short_description: Get public IP facts
-
-description:
- - Get facts for a specific public IP or all public IPs within a resource group.
-
-options:
- name:
- description:
- - Only show results for a specific Public IP.
- resource_group:
- description:
- - Limit results by resource group. Required when using name parameter.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Chris Houseknecht (@chouseknecht)
- - Matt Davis (@nitzmahone)
-'''
-
-EXAMPLES = '''
- - name: Get facts for one Public IP
- azure_rm_publicipaddress_info:
- resource_group: myResourceGroup
- name: publicip001
-
- - name: Get facts for all Public IPs within a resource groups
- azure_rm_publicipaddress_info:
- resource_group: myResourceGroup
-'''
-
-RETURN = '''
-azure_publicipaddresses:
- description:
- - List of public IP address dicts.
- - Please note that this option will be deprecated in 2.10 when curated format will become the only supported format.
- returned: always
- type: list
- example: [{
- "etag": 'W/"a31a6d7d-cb18-40a5-b16d-9f4a36c1b18a"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/publicIPAddresses/pip2001",
- "location": "eastus2",
- "name": "pip2001",
- "properties": {
- "idleTimeoutInMinutes": 4,
- "provisioningState": "Succeeded",
- "publicIPAllocationMethod": "Dynamic",
- "resourceGuid": "29de82f4-a7da-440e-bd3d-9cabb79af95a"
- },
- "type": "Microsoft.Network/publicIPAddresses"
- }]
-publicipaddresses:
- description:
- - List of publicipaddress.
- - Contains the detail which matches azure_rm_publicipaddress parameters.
- - Returned when the format parameter set to curated.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxx---xxxxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/publicIPAddresses/pipb57dc95224
- name:
- description:
- - Name of the public IP address.
- returned: always
- type: str
- sample: pipb57dc95224
- type:
- description:
- - Resource type.
- returned: always
- type: str
- sample: "Microsoft.Network/publicIPAddresses"
- location:
- description:
- - Resource location.
- returned: always
- type: str
- sample: eastus
- tags:
- description:
- - Resource tags.
- returned: always
- type: dict
- sample: {
- "delete": "on-exit",
- "testing": "testing"
- }
- allocation_method:
- description:
- - The public IP allocation method.
- - Possible values are C(static) and C(dynamic).
- returned: always
- type: str
- sample: static
- version:
- description:
- - The public IP address version.
- - Possible values are C(ipv4) and C(ipv6).
- returned: always
- type: str
- sample: ipv4
- dns_settings:
- description:
- - The FQDN of the DNS record associated with the public IP address.
- returned: always
- type: dict
- sample: {
- "domain_name_label": "ansible-b57dc95985712e45eb8b9c2e",
- "fqdn": "ansible-b57dc95985712e45eb8b9c2e.eastus.cloudapp.azure.com",
- "reverse_fqdn": null
- }
- ip_tags:
- description:
- - The list of tags associated with the public IP address.
- returned: always
- type: list
- sample: [
- {
- "type": "FirstPartyUsage",
- "value": "Storage"
- }
- ]
- ip_address:
- description:
- - The Public IP Prefix this Public IP Address should be allocated from.
- returned: always
- type: str
- sample: 40.121.144.14
- idle_timeout:
- description:
- - The idle timeout of the public IP address.
- returned: always
- type: int
- sample: 4
- provisioning_state:
- description:
- - The provisioning state of the PublicIP resource.
- - Possible values is C(Succeeded).
- returned: always
- type: str
- sample: Succeeded
- etag:
- description:
- - A unique read-only string that changes whenever the resource is updated.
- returned: always
- type: str
- sample: "W/'1905ee13-7623-45b1-bc6b-4a12b2fb9d15'"
- sku:
- description:
- - The public IP address SKU.
- returned: always
- type: str
- sample: Basic
-'''
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.common import AzureMissingResourceHttpError, AzureHttpError
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-AZURE_OBJECT_CLASS = 'PublicIp'
-
-
-class AzureRMPublicIPInfo(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str'),
- resource_group=dict(type='str'),
- tags=dict(type='list')
- )
-
- self.results = dict(
- changed=False,
- )
-
- self.name = None
- self.resource_group = None
- self.tags = None
-
- super(AzureRMPublicIPInfo, self).__init__(self.module_arg_spec,
- supports_tags=False,
- facts_module=True)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_publicipaddress_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_publicipaddress_facts' module has been renamed to 'azure_rm_publicipaddress_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- result = []
- if self.name and not self.resource_group:
- self.fail("Parameter error: resource group required when filtering by name.")
-
- if self.name:
- result = self.get_item()
- elif self.resource_group:
- result = self.list_resource_group()
- else:
- result = self.list_all()
-
- raw = self.filter(result)
-
- if is_old_facts:
- self.results['ansible_facts'] = {
- 'azure_publicipaddresses': self.serialize(raw),
- }
- self.results['publicipaddresses'] = self.format(raw)
-
- return self.results
-
- def format(self, raw):
- return [self.pip_to_dict(item) for item in raw]
-
- def serialize(self, raw):
- results = []
- for item in raw:
- pip = self.serialize_obj(item, AZURE_OBJECT_CLASS)
- pip['name'] = item.name
- pip['type'] = item.type
- results.append(pip)
- return results
-
- def filter(self, response):
- return [item for item in response if self.has_tags(item.tags, self.tags)]
-
- # duplicate with azure_rm_publicipaddress
- def pip_to_dict(self, pip):
- result = dict(
- id=pip.id,
- name=pip.name,
- type=pip.type,
- location=pip.location,
- tags=pip.tags,
- allocation_method=pip.public_ip_allocation_method.lower(),
- version=pip.public_ip_address_version.lower(),
- dns_settings=dict(),
- ip_tags=dict(),
- ip_address=pip.ip_address,
- idle_timeout=pip.idle_timeout_in_minutes,
- provisioning_state=pip.provisioning_state,
- etag=pip.etag,
- sku=pip.sku.name
- )
- if pip.dns_settings:
- result['dns_settings']['domain_name_label'] = pip.dns_settings.domain_name_label
- result['dns_settings']['fqdn'] = pip.dns_settings.fqdn
- result['dns_settings']['reverse_fqdn'] = pip.dns_settings.reverse_fqdn
- if pip.ip_tags:
- result['ip_tags'] = [dict(type=x.ip_tag_type, value=x.tag) for x in pip.ip_tags]
- return result
-
- def get_item(self):
- self.log('Get properties for {0}'.format(self.name))
- item = None
- try:
- item = self.network_client.public_ip_addresses.get(self.resource_group, self.name)
- except CloudError:
- pass
- return [item] if item else []
-
- def list_resource_group(self):
- self.log('List items in resource groups')
- try:
- response = self.network_client.public_ip_addresses.list(self.resource_group)
- except AzureHttpError as exc:
- self.fail("Error listing items in resource groups {0} - {1}".format(self.resource_group, str(exc)))
- return response
-
- def list_all(self):
- self.log('List all items')
- try:
- response = self.network_client.public_ip_addresses.list_all()
- except AzureHttpError as exc:
- self.fail("Error listing all items - {0}".format(str(exc)))
- return response
-
-
-def main():
- AzureRMPublicIPInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_rediscache.py b/lib/ansible/modules/cloud/azure/azure_rm_rediscache.py
deleted file mode 100644
index b6a095a25d..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_rediscache.py
+++ /dev/null
@@ -1,779 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_rediscache
-version_added: "2.8"
-short_description: Manage Azure Cache for Redis instance
-description:
- - Create, update and delete instance of Azure Cache for Redis.
-
-options:
- resource_group:
- description:
- - Name of the resource group to which the resource belongs.
- required: True
- name:
- description:
- - Unique name of the Azure Cache for Redis to create or update.
- required: True
- location:
- description:
- - Resource location. If not set, location from the resource group will be used as default.
- sku:
- description:
- - SKU info of Azure Cache for Redis.
- suboptions:
- name:
- description:
- - Type of Azure Cache for Redis to deploy.
- choices:
- - basic
- - standard
- - premium
- required: True
- size:
- description:
- - Size of Azure Cache for Redis to deploy.
- - When I(sku=basic) or I(sku=standard), allowed values are C(C0), C(C1), C(C2), C(C3), C(C4), C(C5), C(C6).
- - When I(sku=premium), allowed values are C(P1), C(P2), C(P3), C(P4).
- - Please see U(https://docs.microsoft.com/en-us/rest/api/redis/redis/create#sku) for allowed values.
- choices:
- - C0
- - C1
- - C2
- - C3
- - C4
- - C5
- - C6
- - P1
- - P2
- - P3
- - P4
- required: True
- enable_non_ssl_port:
- description:
- - When set I(enable_non_ssl_port=true), the non-ssl Redis server port 6379 will be enabled.
- type: bool
- default: false
- maxfragmentationmemory_reserved:
- description:
- - Configures the amount of memory in MB that is reserved to accommodate for memory fragmentation.
- - Please see U(https://docs.microsoft.com/en-us/azure/redis-cache/cache-configure#advanced-settings) for more detail.
- maxmemory_reserved:
- description:
- - Configures the amount of memory in MB that is reserved for non-cache operations.
- - Please see U(https://docs.microsoft.com/en-us/azure/redis-cache/cache-configure#advanced-settings) for more detail.
- maxmemory_policy:
- description:
- - Configures the eviction policy of the cache.
- - Please see U(https://docs.microsoft.com/en-us/azure/redis-cache/cache-configure#advanced-settings) for more detail.
- choices:
- - volatile_lru
- - allkeys_lru
- - volatile_random
- - allkeys_random
- - volatile_ttl
- - noeviction
- notify_keyspace_events:
- description:
- - Allows clients to receive notifications when certain events occur.
- - Please see U(https://docs.microsoft.com/en-us/azure/redis-cache/cache-configure#advanced-settings) for more detail.
- type: str
- shard_count:
- description:
- - The number of shards to be created when I(sku=premium).
- type: int
- static_ip:
- description:
- - Static IP address. Required when deploying an Azure Cache for Redis inside an existing Azure virtual network.
- subnet:
- description:
- - Subnet in a virtual network to deploy the Azure Cache for Redis in.
- - It can be resource id of subnet, for example
- /subscriptions/{subid}/resourceGroups/{resourceGroupName}/Microsoft.{Network|ClassicNetwork}/VirtualNetworks/vnet1/subnets/subnet1.
- - It can be a dictionary where contains I(name), I(virtual_network_name) and I(resource_group).
- - I(name). Name of the subnet.
- - I(resource_group). Resource group name of the subnet.
- - I(virtual_network_name). Name of virtual network to which this subnet belongs.
- tenant_settings:
- description:
- - Dict of tenant settings.
- type: dict
- reboot:
- description:
- - Reboot specified Redis node(s). There can be potential data loss.
- suboptions:
- shard_id:
- description:
- - If clustering is enabled, the id of the shard to be rebooted.
- type: int
- reboot_type:
- description:
- - Which Redis node(s) to reboot.
- choices:
- - primary
- - secondary
- - all
- default: all
- regenerate_key:
- description:
- - Regenerate Redis cache's access keys.
- suboptions:
- key_type:
- description:
- - The Redis key to regenerate.
- choices:
- - primary
- - secondary
- wait_for_provisioning:
- description:
- - Wait till the Azure Cache for Redis instance provisioning_state is Succeeded.
- - It takes several minutes for Azure Cache for Redis to be provisioned ready for use after creating/updating/rebooting.
- - Set this option to C(true) to wait for provisioning_state. Set to C(false) if you don't care about provisioning_state.
- - Poll wait timeout is 60 minutes.
- type: bool
- default: True
- state:
- description:
- - Assert the state of the Azure Cache for Redis.
- - Use C(present) to create or update an Azure Cache for Redis and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yunge Zhu(@yungezz)
-
-'''
-
-EXAMPLES = '''
- - name: Create an Azure Cache for Redis
- azure_rm_rediscache:
- resource_group: myResourceGroup
- name: myRedis
- sku:
- name: basic
- size: C1
-
- - name: Scale up the Azure Cache for Redis
- azure_rm_rediscache:
- resource_group: myResourceGroup
- name: myRedis
- sku:
- name: standard
- size: C1
- tags:
- testing: foo
-
- - name: Force reboot the redis cache
- azure_rm_rediscache:
- resource_group: myResourceGroup
- name: myRedisCache
- reboot:
- reboot_type: all
-
- - name: Create Azure Cache for Redis with subnet
- azure_rm_rediscache:
- resource_group: myResourceGroup
- name: myRedis
- sku:
- name: premium
- size: P1
- subnet: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVirt
- ualNetwork/subnets/mySubnet"
-
-'''
-
-RETURN = '''
-id:
- description:
- - Id of the Azure Cache for Redis.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Cache/Redis/myRedis"
-host_name:
- description:
- - Host name of the Azure Cache for Redis.
- returned: when I(state=present)
- type: str
- sample: "myredis.redis.cache.windows.net"
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.azure_operation import AzureOperationPoller
- from msrest.polling import LROPoller
- from msrest.serialization import Model
- from azure.mgmt.redis import RedisManagementClient
- from azure.mgmt.redis.models import (RedisCreateParameters, RedisUpdateParameters, Sku)
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-sku_spec = dict(
- name=dict(
- type='str',
- choices=['basic', 'standard', 'premium']),
- size=dict(
- type='str',
- choices=['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'P1', 'P2', 'P3', 'P4']
- )
-)
-
-
-reboot_spec = dict(
- shard_id=dict(
- type='str'
- ),
- reboot_type=dict(
- type='str',
- choices=['primary', 'secondary', 'all']
- )
-)
-
-
-regenerate_key_spec = dict(
- key_type=dict(
- type='str',
- choices=['primary', 'secondary']
- )
-)
-
-
-def rediscache_to_dict(redis):
- result = dict(
- id=redis.id,
- name=redis.name,
- location=redis.location,
- sku=dict(
- name=redis.sku.name.lower(),
- size=redis.sku.family + str(redis.sku.capacity)
- ),
- enable_non_ssl_port=redis.enable_non_ssl_port,
- host_name=redis.host_name,
- shard_count=redis.shard_count,
- subnet=redis.subnet_id,
- static_ip=redis.static_ip,
- provisioning_state=redis.provisioning_state,
- tenant_settings=redis.tenant_settings,
- tags=redis.tags if redis.tags else None
- )
- for key in redis.redis_configuration:
- result[hyphen_to_underline(key)] = hyphen_to_underline(redis.redis_configuration.get(key, None))
- return result
-
-
-def hyphen_to_underline(input):
- if input and isinstance(input, str):
- return input.replace("-", "_")
- return input
-
-
-def underline_to_hyphen(input):
- if input and isinstance(input, str):
- return input.replace("_", "-")
- return input
-
-
-def get_reboot_type(type):
- if type == "primary":
- return "PrimaryNode"
- if type == "secondary":
- return "SecondaryNode"
- if type == "all":
- return "AllNodes"
- return type
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMRedisCaches(AzureRMModuleBase):
- """Configuration class for an Azure RM Cache for Redis resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- sku=dict(
- type='dict',
- options=sku_spec
- ),
- enable_non_ssl_port=dict(
- type='bool',
- default=False
- ),
- maxfragmentationmemory_reserved=dict(
- type='int'
- ),
- maxmemory_reserved=dict(
- type='int'
- ),
- maxmemory_policy=dict(
- type='str',
- choices=[
- "volatile_lru",
- "allkeys_lru",
- "volatile_random",
- "allkeys_random",
- "volatile_ttl",
- "noeviction"
- ]
- ),
- notify_keyspace_events=dict(
- type='str'
- ),
- shard_count=dict(
- type='int'
- ),
- static_ip=dict(
- type='str'
- ),
- subnet=dict(
- type='raw'
- ),
- tenant_settings=dict(
- type='dict'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- ),
- reboot=dict(
- type='dict',
- options=reboot_spec
- ),
- regenerate_key=dict(
- type='dict',
- options=regenerate_key_spec
- ),
- wait_for_provisioning=dict(
- type='bool',
- default='True'
- )
- )
-
- self._client = None
-
- self.resource_group = None
- self.name = None
- self.location = None
-
- self.sku = None
- self.size = None
- self.enable_non_ssl_port = False
- self.configuration_file_path = None
- self.shard_count = None
- self.static_ip = None
- self.subnet = None
- self.tenant_settings = None
- self.reboot = None
- self.regenerate_key = None
-
- self.wait_for_provisioning = None
- self.wait_for_provisioning_polling_interval_in_seconds = 30
- self.wait_for_provisioning_polling_times = 120
-
- self.tags = None
-
- self.results = dict(
- changed=False,
- id=None,
- host_name=None
- )
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMRedisCaches, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- old_response = None
- response = None
- to_be_updated = False
-
- # define redis_configuration properties
- self.redis_configuration_properties = ["maxfragmentationmemory_reserved",
- "maxmemory_reserved",
- "maxmemory_policy",
- "notify_keyspace_events"]
-
- # get management client
- self._client = self.get_mgmt_svc_client(RedisManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2018-03-01')
-
- # set location
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- # check subnet exists
- if self.subnet:
- self.subnet = self.parse_subnet()
-
- # get existing Azure Cache for Redis
- old_response = self.get_rediscache()
-
- if old_response:
- self.results['id'] = old_response['id']
-
- if self.state == 'present':
- # if redis not exists
- if not old_response:
- self.log("Azure Cache for Redis instance doesn't exist")
-
- to_be_updated = True
- self.to_do = Actions.Create
-
- if not self.sku:
- self.fail("Please specify sku to creating new Azure Cache for Redis.")
-
- else:
- # redis exists already, do update
- self.log("Azure Cache for Redis instance already exists")
-
- update_tags, self.tags = self.update_tags(old_response.get('tags', None))
-
- if update_tags:
- to_be_updated = True
- self.to_do = Actions.Update
-
- # check if update
- if self.check_update(old_response):
- to_be_updated = True
- self.to_do = Actions.Update
-
- elif self.state == 'absent':
- if old_response:
- self.log("Delete Azure Cache for Redis instance")
- self.results['id'] = old_response['id']
- to_be_updated = True
- self.to_do = Actions.Delete
- else:
- self.results['changed'] = False
- self.log("Azure Cache for Redis {0} not exists.".format(self.name))
-
- if to_be_updated:
- self.log('Need to Create/Update Azure Cache for Redis')
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- if self.to_do == Actions.Create:
- response = self.create_rediscache()
- self.results['id'] = response['id']
- self.results['host_name'] = response['host_name']
-
- if self.to_do == Actions.Update:
- response = self.update_rediscache()
- self.results['id'] = response['id']
- self.results['host_name'] = response['host_name']
-
- if self.to_do == Actions.Delete:
- self.delete_rediscache()
- self.log('Azure Cache for Redis instance deleted')
-
- if self.reboot:
- self.reboot['reboot_type'] = get_reboot_type(self.reboot['reboot_type'])
- self.force_reboot_rediscache()
-
- if self.regenerate_key:
- response = self.rergenerate_rediscache_key()
- self.results['keys'] = response
-
- return self.results
-
- def check_update(self, existing):
- if self.enable_non_ssl_port is not None and existing['enable_non_ssl_port'] != self.enable_non_ssl_port:
- self.log("enable_non_ssl_port diff: origin {0} / update {1}".format(existing['enable_non_ssl_port'], self.enable_non_ssl_port))
- return True
- if self.sku is not None:
- if existing['sku']['name'] != self.sku['name']:
- self.log("sku diff: origin {0} / update {1}".format(existing['sku']['name'], self.sku['name']))
- return True
- if existing['sku']['size'] != self.sku['size']:
- self.log("size diff: origin {0} / update {1}".format(existing['sku']['size'], self.sku['size']))
- return True
- if self.tenant_settings is not None and existing['tenant_settings'] != self.tenant_settings:
- self.log("tenant_settings diff: origin {0} / update {1}".format(existing['tenant_settings'], self.tenant_settings))
- return True
- if self.shard_count is not None and existing['shard_count'] != self.shard_count:
- self.log("shard_count diff: origin {0} / update {1}".format(existing['shard_count'], self.shard_count))
- return True
- if self.subnet is not None and existing['subnet'] != self.subnet:
- self.log("subnet diff: origin {0} / update {1}".format(existing['subnet'], self.subnet))
- return True
- if self.static_ip is not None and existing['static_ip'] != self.static_ip:
- self.log("static_ip diff: origin {0} / update {1}".format(existing['static_ip'], self.static_ip))
- return True
- for config in self.redis_configuration_properties:
- if getattr(self, config) is not None and existing.get(config, None) != getattr(self, config, None):
- self.log("redis_configuration {0} diff: origin {1} / update {2}".format(config, existing.get(config, None), getattr(self, config, None)))
- return True
- return False
-
- def create_rediscache(self):
- '''
- Creates Azure Cache for Redis instance with the specified configuration.
-
- :return: deserialized Azure Cache for Redis instance state dictionary
- '''
- self.log(
- "Creating Azure Cache for Redis instance {0}".format(self.name))
-
- try:
- redis_config = dict()
- for key in self.redis_configuration_properties:
- if getattr(self, key, None):
- redis_config[underline_to_hyphen(key)] = underline_to_hyphen(getattr(self, key))
-
- params = RedisCreateParameters(
- location=self.location,
- sku=Sku(self.sku['name'].title(), self.sku['size'][0], self.sku['size'][1:]),
- tags=self.tags,
- redis_configuration=redis_config,
- enable_non_ssl_port=self.enable_non_ssl_port,
- tenant_settings=self.tenant_settings,
- shard_count=self.shard_count,
- subnet_id=self.subnet,
- static_ip=self.static_ip
- )
-
- response = self._client.redis.create(resource_group_name=self.resource_group,
- name=self.name,
- parameters=params)
- if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- if self.wait_for_provisioning:
- self.wait_for_redis_running()
-
- except CloudError as exc:
- self.log('Error attempting to create the Azure Cache for Redis instance.')
- self.fail(
- "Error creating the Azure Cache for Redis instance: {0}".format(str(exc)))
- return rediscache_to_dict(response)
-
- def update_rediscache(self):
- '''
- Updates Azure Cache for Redis instance with the specified configuration.
-
- :return: Azure Cache for Redis instance state dictionary
- '''
- self.log(
- "Updating Azure Cache for Redis instance {0}".format(self.name))
-
- try:
- redis_config = dict()
- for key in self.redis_configuration_properties:
- if getattr(self, key, None):
- redis_config[underline_to_hyphen(key)] = underline_to_hyphen(getattr(self, key))
-
- params = RedisUpdateParameters(
- redis_configuration=redis_config,
- enable_non_ssl_port=self.enable_non_ssl_port,
- tenant_settings=self.tenant_settings,
- shard_count=self.shard_count,
- sku=Sku(self.sku['name'].title(), self.sku['size'][0], self.sku['size'][1:]),
- tags=self.tags
- )
-
- response = self._client.redis.update(resource_group_name=self.resource_group,
- name=self.name,
- parameters=params)
- if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- if self.wait_for_provisioning:
- self.wait_for_redis_running()
-
- except CloudError as exc:
- self.log('Error attempting to update the Azure Cache for Redis instance.')
- self.fail(
- "Error updating the Azure Cache for Redis instance: {0}".format(str(exc)))
- return rediscache_to_dict(response)
-
- def delete_rediscache(self):
- '''
- Deletes specified Azure Cache for Redis instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the Azure Cache for Redis instance {0}".format(self.name))
- try:
- response = self._client.redis.delete(resource_group_name=self.resource_group,
- name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the Azure Cache for Redis instance.')
- self.fail(
- "Error deleting the Azure Cache for Redis instance: {0}".format(str(e)))
- return True
-
- def get_rediscache(self):
- '''
- Gets the properties of the specified Azure Cache for Redis instance.
-
- :return: Azure Cache for Redis instance state dictionary
- '''
- self.log("Checking if the Azure Cache for Redis instance {0} is present".format(self.name))
-
- response = None
-
- try:
- response = self._client.redis.get(resource_group_name=self.resource_group,
- name=self.name)
-
- self.log("Response : {0}".format(response))
- self.log("Azure Cache for Redis instance : {0} found".format(response.name))
- return rediscache_to_dict(response)
-
- except CloudError as ex:
- self.log("Didn't find Azure Cache for Redis {0} in resource group {1}".format(
- self.name, self.resource_group))
-
- return False
-
- def force_reboot_rediscache(self):
- '''
- Force reboot specified redis cache instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Force reboot the redis cache instance {0}".format(self.name))
- try:
- response = self._client.redis.force_reboot(resource_group_name=self.resource_group,
- name=self.name,
- reboot_type=self.reboot['reboot_type'],
- shard_id=self.reboot.get('shard_id'))
- if isinstance(response, AzureOperationPoller) or isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- if self.wait_for_provisioning:
- self.wait_for_redis_running()
- except CloudError as e:
- self.log('Error attempting to force reboot the redis cache instance.')
- self.fail(
- "Error force rebooting the redis cache instance: {0}".format(str(e)))
- return True
-
- def rergenerate_rediscache_key(self):
- '''
- Regenerate key of redis cache instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Regenerate key of redis cache instance {0}".format(self.name))
- try:
- response = self._client.redis.regenerate_key(resource_group_name=self.resource_group,
- name=self.name,
- key_type=self.regenerate_key['key_type'].title())
- return response.to_dict()
- except CloudError as e:
- self.log('Error attempting to regenerate key of redis cache instance.')
- self.fail(
- "Error regenerate key of redis cache instance: {0}".format(str(e)))
-
- def get_subnet(self):
- '''
- Gets the properties of the specified subnet.
-
- :return: subnet id
- '''
- self.log("Checking if the subnet {0} is present".format(self.name))
-
- response = None
-
- try:
- response = self.network_client.subnets.get(self.subnet['resource_group'],
- self.subnet['virtual_network_name'],
- self.subnet['name'])
-
- self.log("Subnet found : {0}".format(response))
- return response.id
-
- except CloudError as ex:
- self.log("Didn't find subnet {0} in resource group {1}".format(
- self.subnet['name'], self.subnet['resource_group']))
-
- return False
-
- def parse_subnet(self):
- if isinstance(self.subnet, dict):
- if 'virtual_network_name' not in self.subnet or \
- 'name' not in self.subnet:
- self.fail("Subnet dict must contains virtual_network_name and name")
- if 'resource_group' not in self.subnet:
- self.subnet['resource_group'] = self.resource_group
- subnet_id = self.get_subnet()
- else:
- subnet_id = self.subnet
- return subnet_id
-
- def wait_for_redis_running(self):
- try:
- response = self._client.redis.get(resource_group_name=self.resource_group, name=self.name)
- status = response.provisioning_state
- polling_times = 0
-
- while polling_times < self.wait_for_provisioning_polling_times:
- if status.lower() != "succeeded":
- polling_times += 1
- time.sleep(self.wait_for_provisioning_polling_interval_in_seconds)
- response = self._client.redis.get(resource_group_name=self.resource_group, name=self.name)
- status = response.provisioning_state
- else:
- return True
- self.fail("Azure Cache for Redis is not running after 60 mins.")
- except CloudError as e:
- self.fail("Failed to get Azure Cache for Redis: {0}".format(str(e)))
-
-
-def main():
- """Main execution"""
- AzureRMRedisCaches()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_rediscache_info.py b/lib/ansible/modules/cloud/azure/azure_rm_rediscache_info.py
deleted file mode 100644
index d238e48e36..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_rediscache_info.py
+++ /dev/null
@@ -1,354 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yunge Zhu <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_rediscache_info
-
-version_added: "2.9"
-
-short_description: Get Azure Cache for Redis instance facts
-
-description:
- - Get facts for Azure Cache for Redis instance.
-
-options:
- resource_group:
- description:
- - The resource group to search for the desired Azure Cache for Redis.
- required: True
- name:
- description:
- - Limit results to a specific Azure Cache for Redis.
- return_access_keys:
- description:
- - Indicate weather to return access keys of the Azure Cache for Redis.
- default: False
- type: bool
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yunge Zhu (@yungezz)
-'''
-
-EXAMPLES = '''
- - name: Get Azure Cache for Redis by name
- azure_rm_rediscache_info:
- resource_group: myResourceGroup
- name: myRedis
-
- - name: Get Azure Cache for Redis with access keys by name
- azure_rm_rediscache_info:
- resource_group: myResourceGroup
- name: myRedis
- return_access_keys: true
-
- - name: Get Azure Cache for Redis in specific resource group
- azure_rm_rediscache_info:
- resource_group: myResourceGroup
-'''
-
-RETURN = '''
-rediscaches:
- description:
- - List of Azure Cache for Redis instances.
- returned: always
- type: complex
- contains:
- resource_group:
- description:
- - Name of a resource group where the Azure Cache for Redis belongs to.
- returned: always
- type: str
- sample: myResourceGroup
- name:
- description:
- - Name of the Azure Cache for Redis.
- returned: always
- type: str
- sample: myRedis
- id:
- description:
- - Id of the Azure Cache for Redis.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Cache/Redis/myRedis
- provisioning_state:
- description:
- - Provisioning state of the Redis cache.
- returned: always
- type: str
- sample: Creating
- location:
- description:
- - Location of the Azure Cache for Redis.
- returned: always
- type: str
- sample: WestUS
- enable_non_ssl_port:
- description:
- - Specifies whether the non-ssl Redis server port (6379) is enabled.
- returned: always
- type: bool
- sample: false
- sku:
- description:
- - Dict of SKU information.
- returned: always
- type: dict
- contains:
- name:
- description:
- - Name of the SKU.
- returned: always
- type: str
- sample: standard
- size:
- description:
- - Size of the Azure Cache for Redis.
- returned: always
- type: str
- sample: C1
- static_ip:
- description:
- - Static IP address.
- returned: always
- type: str
- sample: 10.75.0.11
- subnet:
- description:
- - The full resource ID of a subnet in a virtual network to deploy the Azure Cache for Redis in.
- returned: always
- type: str
- sample:
- - "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/Microsoft.Network/VirtualNetworks/myVirtualNetwo
- rk/subnets/mySubnet"
- configuration:
- description:
- - Dict of Redis configuration.
- returned: always
- type: dict
- sample: maxmeory_reserved
- host_name:
- description:
- - Redis host name.
- returned: always
- type: str
- sample: testRedis.redis.cache.windows.net
- shard_count:
- description:
- - The number of shards on a Premium Cluster Cache.
- returned: always
- type: int
- sample: 1
- tenant_settings:
- description:
- - Dict of tenant settings.
- returned: always
- type: dict
- sample: { "key1": "value1" }
- tags:
- description:
- - Resource tags.
- returned: always
- type: dict
- sample: { "key1": "value1" }
- access_keys:
- description:
- - Azure Cache for Redis access keys.
- type: dict
- returned: when I(return_access_keys=true)
- contains:
- primary:
- description:
- - The current primary key that clients can use to authenticate the Redis cahce.
- returned: always
- type: str
- sample: X2xXXxx7xxxxxx5xxxx0xxxxx75xxxxxxxxXXXxxxxx=
- secondary:
- description:
- - The current secondary key that clients can use to authenticate the Redis cahce.
- returned: always
- type: str
- sample: X2xXXxx7xxxxxx5xxxx0xxxxx75xxxxxxxxXXXxxxxx=
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from azure.common import AzureHttpError
- from azure.mgmt.redis import RedisManagementClient
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # handled in azure_rm_common
- pass
-
-import re
-
-
-class AzureRMRedisCacheInfo(AzureRMModuleBase):
- """Utility class to get Azure Cache for Redis facts"""
-
- def __init__(self):
-
- self.module_args = dict(
- name=dict(type='str'),
- resource_group=dict(
- type='str',
- required=True
- ),
- return_access_keys=dict(
- type='bool',
- default=False
- ),
- tags=dict(type='list')
- )
-
- self.results = dict(
- changed=False,
- rediscaches=[]
- )
-
- self.name = None
- self.resource_group = None
- self.profile_name = None
- self.tags = None
-
- self._client = None
-
- super(AzureRMRedisCacheInfo, self).__init__(
- derived_arg_spec=self.module_args,
- supports_tags=False,
- facts_module=True
- )
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_rediscache_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_rediscache_facts' module has been renamed to 'azure_rm_rediscache_info'", version='2.13')
-
- for key in self.module_args:
- setattr(self, key, kwargs[key])
-
- # get management client
- self._client = self.get_mgmt_svc_client(RedisManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2018-03-01')
-
- if self.name:
- self.results['rediscaches'] = self.get_item()
- else:
- self.results['rediscaches'] = self.list_by_resourcegroup()
-
- return self.results
-
- def get_item(self):
- """Get a single Azure Cache for Redis"""
-
- self.log('Get properties for {0}'.format(self.name))
-
- item = None
- result = []
-
- try:
- item = self._client.redis.get(resource_group_name=self.resource_group, name=self.name)
- except CloudError:
- pass
-
- if item and self.has_tags(item.tags, self.tags):
- result = [self.serialize_rediscache(item)]
-
- return result
-
- def list_by_resourcegroup(self):
- """Get all Azure Cache for Redis within a resource group"""
-
- self.log('List all Azure Cache for Redis within a resource group')
-
- try:
- response = self._client.redis.list_by_resource_group(self.resource_group)
- except CloudError as exc:
- self.fail('Failed to list all items - {0}'.format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.serialize_rediscache(item))
-
- return results
-
- def list_keys(self):
- """List Azure Cache for Redis keys"""
-
- self.log('List keys for {0}'.format(self.name))
-
- item = None
-
- try:
- item = self._client.redis.list_keys(resource_group_name=self.resource_group, name=self.name)
- except CloudError as exc:
- self.fail("Failed to list redis keys of {0} - {1}".format(self.name, str(exc)))
-
- return item
-
- def serialize_rediscache(self, rediscache):
- '''
- Convert an Azure Cache for Redis object to dict.
- :param rediscache: Azure Cache for Redis object
- :return: dict
- '''
- new_result = dict(
- id=rediscache.id,
- resource_group=re.sub('\\/.*', '', re.sub('.*resourceGroups\\/', '', rediscache.id)),
- name=rediscache.name,
- location=rediscache.location,
- provisioning_state=rediscache.provisioning_state,
- configuration=rediscache.redis_configuration,
- tenant_settings=rediscache.tenant_settings,
- shard_count=rediscache.shard_count,
- enable_non_ssl_port=rediscache.enable_non_ssl_port,
- static_ip=rediscache.static_ip,
- subnet=rediscache.subnet_id,
- host_name=rediscache.host_name,
- tags=rediscache.tags
- )
-
- if rediscache.sku:
- new_result['sku'] = dict(
- name=rediscache.sku.name.lower(),
- size=rediscache.sku.family + str(rediscache.sku.capacity)
- )
- if self.return_access_keys:
- access_keys = self.list_keys()
- if access_keys:
- new_result['access_keys'] = dict(
- primary=access_keys.primary_key,
- secondary=access_keys.secondary_key
- )
- return new_result
-
-
-def main():
- """Main module execution code path"""
-
- AzureRMRedisCacheInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_rediscachefirewallrule.py b/lib/ansible/modules/cloud/azure/azure_rm_rediscachefirewallrule.py
deleted file mode 100644
index a8ba7816ee..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_rediscachefirewallrule.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yunge Zhu, <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_rediscachefirewallrule
-version_added: "2.8"
-short_description: Manage Azure Cache for Redis Firewall rules
-description:
- - Create, update and delete Azure Cache for Redis Firewall rules.
-
-options:
- resource_group:
- description:
- - Name of the resource group to which the resource belongs.
- required: True
- cache_name:
- description:
- - Name of the Azure Cache for Redis.
- required: True
- name:
- description:
- - Name of the Firewall rule.
- required: True
- start_ip_address:
- description:
- - The start IP address of the Azure Cache for Redis Firewall rule. Must be IPv4 format.
- - Required when creating Firewall rule.
- end_ip_address:
- description:
- - The end IP address of the Azure Cache for Redis Firewall rule. Must be IPv4 format.
- - Required when creating Firewall rule.
- state:
- description:
- - Assert the state of the Firewall rule of Azure Cache for Redis.
- - Use C(present) to create or update Firewall rule of Azure Cache for Redis and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yunge Zhu(@yungezz)
-
-'''
-
-EXAMPLES = '''
- - name: Create a Firewall rule for Azure Cache for Redis
- azure_rm_rediscachefirewallrule:
- resource_group: myResourceGroup
- cache_name: myRedisCache
- name: myRule
- start_ip_address: 192.168.1.1
- end_ip_address: 192.168.1.4
-
- - name: Update a Firewall rule for Azure Cache for Redis
- azure_rm_rediscachefirewallrule:
- resource_group: myResourceGroup
- cache_name: myRedisCache
- name: myRule
- end_ip_address: 192.168.1.5
-'''
-
-RETURN = '''
-id:
- description:
- - Id of the Azure Cache for Redis.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Cache/redis/myRedis/firewallRules/myRule"
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrestazure.azure_operation import AzureOperationPoller
- from msrest.serialization import Model
- from azure.mgmt.redis import RedisManagementClient
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def firewall_rule_to_dict(rule):
- return dict(
- id=rule.id,
- name=rule.name,
- start_ip_address=rule.start_ip,
- end_ip_address=rule.end_ip,
- type=rule.type
- )
-
-
-class Actions:
- NoAction, CreateUpdate, Delete = range(3)
-
-
-class AzureRMRedisCacheFirewallRule(AzureRMModuleBase):
- """Configuration class for an Azure RM Cache for Redis Firewall Rule resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- cache_name=dict(
- type='str',
- required=True
- ),
- start_ip_address=dict(
- type='str'
- ),
- end_ip_address=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self._client = None
-
- self.resource_group = None
- self.name = None
- self.cache_name = None
-
- self.start_ip_address = None
- self.end_ip_address = None
-
- self.results = dict(
- changed=False,
- id=None
- )
- self.state = None
-
- self.to_do = Actions.NoAction
-
- super(AzureRMRedisCacheFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()):
- setattr(self, key, kwargs[key])
-
- old_response = None
- response = None
-
- # get management client
- self._client = self.get_mgmt_svc_client(RedisManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2018-03-01')
-
- # check if the firewall rule exists
- old_response = self.get()
-
- if old_response:
- self.results['id'] = old_response['id']
-
- if self.state == 'present':
- # if firewall rule not exists
- if not old_response:
- self.log("Firewall Rule of Azure Cache for Redis doesn't exist")
-
- self.to_do = Actions.CreateUpdate
-
- else:
- # redis exists already, do update
- self.log("Firewall Rule of Azure Cache for Redis already exists")
-
- if self.start_ip_address is None:
- self.start_ip_address = old_response['start_ip_address']
- if self.end_ip_address is None:
- self.end_ip_address = old_response['end_ip_address']
-
- # check if update
- if self.check_update(old_response):
- self.to_do = Actions.CreateUpdate
-
- elif self.state == 'absent':
- if old_response:
- self.log("Delete Firewall Rule of Azure Cache for Redis")
- self.results['id'] = old_response['id']
- self.to_do = Actions.Delete
- else:
- self.results['changed'] = False
- self.log("Azure Cache for Redis {0} doesn't exist.".format(self.name))
-
- if self.to_do == Actions.CreateUpdate:
- self.log('Need to Create/Update Firewall rule of Azure Cache for Redis')
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- response = self.create_or_update()
- self.results['id'] = response['id']
-
- if self.to_do == Actions.Delete:
- self.log('Delete Firewall rule of Azure Cache for Redis')
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete()
- self.log('Firewall rule of Azure Cache for Redis deleted')
-
- return self.results
-
- def check_update(self, existing):
- if self.start_ip_address and self.start_ip_address != existing['start_ip_address']:
- self.log("start_ip_address diff: origin {0} / update {1}".format(existing['start_ip_address'], self.start_ip_address))
- return True
- if self.end_ip_address and self.end_ip_address != existing['end_ip_address']:
- self.log("end_ip_address diff: origin {0} / update {1}".format(existing['end_ip_address'], self.end_ip_address))
- return True
- return False
-
- def create_or_update(self):
- '''
- Creates Firewall rule of Azure Cache for Redis with the specified configuration.
-
- :return: deserialized Firewall rule of Azure Cache for Redis state dictionary
- '''
- self.log(
- "Creating Firewall rule of Azure Cache for Redis {0}".format(self.name))
-
- try:
- response = self._client.firewall_rules.create_or_update(resource_group_name=self.resource_group,
- cache_name=self.cache_name,
- rule_name=self.name,
- start_ip=self.start_ip_address,
- end_ip=self.end_ip_address)
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create/update Firewall rule of Azure Cache for Redis.')
- self.fail(
- "Error creating/updating Firewall rule of Azure Cache for Redis: {0}".format(str(exc)))
- return firewall_rule_to_dict(response)
-
- def delete(self):
- '''
- Deletes specified Firewall rule of Azure Cache for Redis in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the Firewall rule of Azure Cache for Redis {0}".format(self.name))
- try:
- response = self._client.firewall_rules.delete(resource_group_name=self.resource_group,
- rule_name=self.name,
- cache_name=self.cache_name)
- except CloudError as e:
- self.log('Error attempting to delete the Firewall rule of Azure Cache for Redis.')
- self.fail(
- "Error deleting the Firewall rule of Azure Cache for Redis: {0}".format(str(e)))
- return True
-
- def get(self):
- '''
- Gets the properties of the specified Firewall rule of Azure Cache for Redis.
-
- :return: Azure Cache for Redis Firewall Rule instance state dictionary
- '''
- self.log("Checking if the Firewall Rule {0} is present".format(self.name))
-
- response = None
-
- try:
- response = self._client.firewall_rules.get(resource_group_name=self.resource_group,
- rule_name=self.name,
- cache_name=self.cache_name)
-
- self.log("Response : {0}".format(response))
- self.log("Redis Firewall Rule : {0} found".format(response.name))
- return firewall_rule_to_dict(response)
-
- except CloudError as ex:
- self.log("Didn't find Azure Redis Firewall rule {0} in resource group {1}".format(
- self.name, self.resource_group))
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMRedisCacheFirewallRule()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_resource.py b/lib/ansible/modules/cloud/azure/azure_rm_resource.py
deleted file mode 100644
index 6ea3e3bb9b..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_resource.py
+++ /dev/null
@@ -1,427 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_resource
-version_added: "2.6"
-short_description: Create any Azure resource
-description:
- - Create, update or delete any Azure resource using Azure REST API.
- - This module gives access to resources that are not supported via Ansible modules.
- - Refer to U(https://docs.microsoft.com/en-us/rest/api/) regarding details related to specific resource REST API.
-
-options:
- url:
- description:
- - Azure RM Resource URL.
- api_version:
- description:
- - Specific API version to be used.
- provider:
- description:
- - Provider type.
- - Required if URL is not specified.
- resource_group:
- description:
- - Resource group to be used.
- - Required if URL is not specified.
- resource_type:
- description:
- - Resource type.
- - Required if URL is not specified.
- resource_name:
- description:
- - Resource name.
- - Required if URL Is not specified.
- subresource:
- description:
- - List of subresources.
- suboptions:
- namespace:
- description:
- - Subresource namespace.
- type:
- description:
- - Subresource type.
- name:
- description:
- - Subresource name.
- body:
- description:
- - The body of the HTTP request/response to the web service.
- method:
- description:
- - The HTTP method of the request or response. It must be uppercase.
- choices:
- - GET
- - PUT
- - POST
- - HEAD
- - PATCH
- - DELETE
- - MERGE
- default: "PUT"
- status_code:
- description:
- - A valid, numeric, HTTP status code that signifies success of the request. Can also be comma separated list of status codes.
- type: list
- default: [ 200, 201, 202 ]
- idempotency:
- description:
- - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body).
- default: no
- type: bool
- polling_timeout:
- description:
- - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body).
- default: 0
- type: int
- version_added: "2.8"
- polling_interval:
- description:
- - If enabled, idempotency check will be done by using I(method=GET) first and then comparing with I(body).
- default: 60
- type: int
- version_added: "2.8"
- state:
- description:
- - Assert the state of the resource. Use C(present) to create or update resource or C(absent) to delete resource.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Update scaleset info using azure_rm_resource
- azure_rm_resource:
- resource_group: myResourceGroup
- provider: compute
- resource_type: virtualmachinescalesets
- resource_name: myVmss
- api_version: "2017-12-01"
- body: { body }
-'''
-
-RETURN = '''
-response:
- description:
- - Response specific to resource type.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- type: str
- returned: always
- sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Storage/storageAccounts/staccb57dc95183"
- kind:
- description:
- - The kind of storage.
- type: str
- returned: always
- sample: Storage
- location:
- description:
- - The resource location, defaults to location of the resource group.
- type: str
- returned: always
- sample: eastus
- name:
- description:
- The storage account name.
- type: str
- returned: always
- sample: staccb57dc95183
- properties:
- description:
- - The storage account's related properties.
- type: dict
- returned: always
- sample: {
- "creationTime": "2019-06-13T06:34:33.0996676Z",
- "encryption": {
- "keySource": "Microsoft.Storage",
- "services": {
- "blob": {
- "enabled": true,
- "lastEnabledTime": "2019-06-13T06:34:33.1934074Z"
- },
- "file": {
- "enabled": true,
- "lastEnabledTime": "2019-06-13T06:34:33.1934074Z"
- }
- }
- },
- "networkAcls": {
- "bypass": "AzureServices",
- "defaultAction": "Allow",
- "ipRules": [],
- "virtualNetworkRules": []
- },
- "primaryEndpoints": {
- "blob": "https://staccb57dc95183.blob.core.windows.net/",
- "file": "https://staccb57dc95183.file.core.windows.net/",
- "queue": "https://staccb57dc95183.queue.core.windows.net/",
- "table": "https://staccb57dc95183.table.core.windows.net/"
- },
- "primaryLocation": "eastus",
- "provisioningState": "Succeeded",
- "secondaryLocation": "westus",
- "statusOfPrimary": "available",
- "statusOfSecondary": "available",
- "supportsHttpsTrafficOnly": false
- }
- sku:
- description:
- - The storage account SKU.
- type: dict
- returned: always
- sample: {
- "name": "Standard_GRS",
- "tier": "Standard"
- }
- tags:
- description:
- - Resource tags.
- type: dict
- returned: always
- sample: { 'key1': 'value1' }
- type:
- description:
- - The resource type.
- type: str
- returned: always
- sample: "Microsoft.Storage/storageAccounts"
-
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.azure_rm_common_rest import GenericRestClient
-from ansible.module_utils.common.dict_transformations import dict_merge
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.service_client import ServiceClient
- from msrestazure.tools import resource_id, is_valid_resource_id
- import json
-
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMResource(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- url=dict(
- type='str'
- ),
- provider=dict(
- type='str',
- ),
- resource_group=dict(
- type='str',
- ),
- resource_type=dict(
- type='str',
- ),
- resource_name=dict(
- type='str',
- ),
- subresource=dict(
- type='list',
- default=[]
- ),
- api_version=dict(
- type='str'
- ),
- method=dict(
- type='str',
- default='PUT',
- choices=["GET", "PUT", "POST", "HEAD", "PATCH", "DELETE", "MERGE"]
- ),
- body=dict(
- type='raw'
- ),
- status_code=dict(
- type='list',
- default=[200, 201, 202]
- ),
- idempotency=dict(
- type='bool',
- default=False
- ),
- polling_timeout=dict(
- type='int',
- default=0
- ),
- polling_interval=dict(
- type='int',
- default=60
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False,
- response=None
- )
- self.mgmt_client = None
- self.url = None
- self.api_version = None
- self.provider = None
- self.resource_group = None
- self.resource_type = None
- self.resource_name = None
- self.subresource_type = None
- self.subresource_name = None
- self.subresource = []
- self.method = None
- self.status_code = []
- self.idempotency = False
- self.polling_timeout = None
- self.polling_interval = None
- self.state = None
- self.body = None
- super(AzureRMResource, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if self.state == 'absent':
- self.method = 'DELETE'
- self.status_code.append(204)
-
- if self.url is None:
- orphan = None
- rargs = dict()
- rargs['subscription'] = self.subscription_id
- rargs['resource_group'] = self.resource_group
- if not (self.provider is None or self.provider.lower().startswith('.microsoft')):
- rargs['namespace'] = "Microsoft." + self.provider
- else:
- rargs['namespace'] = self.provider
-
- if self.resource_type is not None and self.resource_name is not None:
- rargs['type'] = self.resource_type
- rargs['name'] = self.resource_name
- for i in range(len(self.subresource)):
- resource_ns = self.subresource[i].get('namespace', None)
- resource_type = self.subresource[i].get('type', None)
- resource_name = self.subresource[i].get('name', None)
- if resource_type is not None and resource_name is not None:
- rargs['child_namespace_' + str(i + 1)] = resource_ns
- rargs['child_type_' + str(i + 1)] = resource_type
- rargs['child_name_' + str(i + 1)] = resource_name
- else:
- orphan = resource_type
- else:
- orphan = self.resource_type
-
- self.url = resource_id(**rargs)
-
- if orphan is not None:
- self.url += '/' + orphan
-
- # if api_version was not specified, get latest one
- if not self.api_version:
- try:
- # extract provider and resource type
- if "/providers/" in self.url:
- provider = self.url.split("/providers/")[1].split("/")[0]
- resourceType = self.url.split(provider + "/")[1].split("/")[0]
- url = "/subscriptions/" + self.subscription_id + "/providers/" + provider
- api_versions = json.loads(self.mgmt_client.query(url, "GET", {'api-version': '2015-01-01'}, None, None, [200], 0, 0).text)
- for rt in api_versions['resourceTypes']:
- if rt['resourceType'].lower() == resourceType.lower():
- self.api_version = rt['apiVersions'][0]
- break
- else:
- # if there's no provider in API version, assume Microsoft.Resources
- self.api_version = '2018-05-01'
- if not self.api_version:
- self.fail("Couldn't find api version for {0}/{1}".format(provider, resourceType))
- except Exception as exc:
- self.fail("Failed to obtain API version: {0}".format(str(exc)))
-
- query_parameters = {}
- query_parameters['api-version'] = self.api_version
-
- header_parameters = {}
- header_parameters['Content-Type'] = 'application/json; charset=utf-8'
-
- needs_update = True
- response = None
-
- if self.idempotency:
- original = self.mgmt_client.query(self.url, "GET", query_parameters, None, None, [200, 404], 0, 0)
-
- if original.status_code == 404:
- if self.state == 'absent':
- needs_update = False
- else:
- try:
- response = json.loads(original.text)
- needs_update = (dict_merge(response, self.body) != response)
- except Exception:
- pass
-
- if needs_update:
- response = self.mgmt_client.query(self.url,
- self.method,
- query_parameters,
- header_parameters,
- self.body,
- self.status_code,
- self.polling_timeout,
- self.polling_interval)
- if self.state == 'present':
- try:
- response = json.loads(response.text)
- except Exception:
- response = response.text
- else:
- response = None
-
- self.results['response'] = response
- self.results['changed'] = needs_update
-
- return self.results
-
-
-def main():
- AzureRMResource()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_resource_info.py b/lib/ansible/modules/cloud/azure/azure_rm_resource_info.py
deleted file mode 100644
index 354cd79578..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_resource_info.py
+++ /dev/null
@@ -1,431 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_resource_info
-version_added: "2.9"
-short_description: Generic facts of Azure resources
-description:
- - Obtain facts of any resource using Azure REST API.
- - This module gives access to resources that are not supported via Ansible modules.
- - Refer to U(https://docs.microsoft.com/en-us/rest/api/) regarding details related to specific resource REST API.
-
-options:
- url:
- description:
- - Azure RM Resource URL.
- api_version:
- description:
- - Specific API version to be used.
- provider:
- description:
- - Provider type, should be specified in no URL is given.
- resource_group:
- description:
- - Resource group to be used.
- - Required if URL is not specified.
- resource_type:
- description:
- - Resource type.
- resource_name:
- description:
- - Resource name.
- subresource:
- description:
- - List of subresources.
- suboptions:
- namespace:
- description:
- - Subresource namespace.
- type:
- description:
- - Subresource type.
- name:
- description:
- - Subresource name.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get scaleset info
- azure_rm_resource_info:
- resource_group: myResourceGroup
- provider: compute
- resource_type: virtualmachinescalesets
- resource_name: myVmss
- api_version: "2017-12-01"
-
- - name: Query all the resources in the resource group
- azure_rm_resource_info:
- resource_group: "{{ resource_group }}"
- resource_type: resources
-'''
-
-RETURN = '''
-response:
- description:
- - Response specific to resource type.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Id of the Azure resource.
- type: str
- returned: always
- sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/virtualMachines/myVM"
- location:
- description:
- - Resource location.
- type: str
- returned: always
- sample: eastus
- name:
- description:
- - Resource name.
- type: str
- returned: always
- sample: myVM
- properties:
- description:
- - Specifies the virtual machine's property.
- type: complex
- returned: always
- contains:
- diagnosticsProfile:
- description:
- - Specifies the boot diagnostic settings state.
- type: complex
- returned: always
- contains:
- bootDiagnostics:
- description:
- - A debugging feature, which to view Console Output and Screenshot to diagnose VM status.
- type: dict
- returned: always
- sample: {
- "enabled": true,
- "storageUri": "https://vxisurgdiag.blob.core.windows.net/"
- }
- hardwareProfile:
- description:
- - Specifies the hardware settings for the virtual machine.
- type: dict
- returned: always
- sample: {
- "vmSize": "Standard_D2s_v3"
- }
- networkProfile:
- description:
- - Specifies the network interfaces of the virtual machine.
- type: complex
- returned: always
- contains:
- networkInterfaces:
- description:
- - Describes a network interface reference.
- type: list
- returned: always
- sample:
- - {
- "id": "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/networkInterfaces/myvm441"
- }
- osProfile:
- description:
- - Specifies the operating system settings for the virtual machine.
- type: complex
- returned: always
- contains:
- adminUsername:
- description:
- - Specifies the name of the administrator account.
- type: str
- returned: always
- sample: azureuser
- allowExtensionOperations:
- description:
- - Specifies whether extension operations should be allowed on the virtual machine.
- - This may only be set to False when no extensions are present on the virtual machine.
- type: bool
- returned: always
- sample: true
- computerName:
- description:
- - Specifies the host OS name of the virtual machine.
- type: str
- returned: always
- sample: myVM
- requireGuestProvisionSignale:
- description:
- - Specifies the host require guest provision signal or not.
- type: bool
- returned: always
- sample: true
- secrets:
- description:
- - Specifies set of certificates that should be installed onto the virtual machine.
- type: list
- returned: always
- sample: []
- linuxConfiguration:
- description:
- - Specifies the Linux operating system settings on the virtual machine.
- type: dict
- returned: when OS type is Linux
- sample: {
- "disablePasswordAuthentication": false,
- "provisionVMAgent": true
- }
- provisioningState:
- description:
- - The provisioning state.
- type: str
- returned: always
- sample: Succeeded
- vmID:
- description:
- - Specifies the VM unique ID which is a 128-bits identifier that is encoded and stored in all Azure laaS VMs SMBIOS.
- - It can be read using platform BIOS commands.
- type: str
- returned: always
- sample: "eb86d9bb-6725-4787-a487-2e497d5b340c"
- storageProfile:
- description:
- - Specifies the storage account type for the managed disk.
- type: complex
- returned: always
- contains:
- dataDisks:
- description:
- - Specifies the parameters that are used to add a data disk to virtual machine.
- type: list
- returned: always
- sample:
- - {
- "caching": "None",
- "createOption": "Attach",
- "diskSizeGB": 1023,
- "lun": 2,
- "managedDisk": {
- "id": "/subscriptions/xxxx....xxxx/resourceGroups/V-XISURG/providers/Microsoft.Compute/disks/testdisk2",
- "storageAccountType": "StandardSSD_LRS"
- },
- "name": "testdisk2"
- }
- - {
- "caching": "None",
- "createOption": "Attach",
- "diskSizeGB": 1023,
- "lun": 1,
- "managedDisk": {
- "id": "/subscriptions/xxxx...xxxx/resourceGroups/V-XISURG/providers/Microsoft.Compute/disks/testdisk3",
- "storageAccountType": "StandardSSD_LRS"
- },
- "name": "testdisk3"
- }
-
- imageReference:
- description:
- - Specifies information about the image to use.
- type: dict
- returned: always
- sample: {
- "offer": "UbuntuServer",
- "publisher": "Canonical",
- "sku": "18.04-LTS",
- "version": "latest"
- }
- osDisk:
- description:
- - Specifies information about the operating system disk used by the virtual machine.
- type: dict
- returned: always
- sample: {
- "caching": "ReadWrite",
- "createOption": "FromImage",
- "diskSizeGB": 30,
- "managedDisk": {
- "id": "/subscriptions/xxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Compute/disks/myVM_disk1_xxx",
- "storageAccountType": "Premium_LRS"
- },
- "name": "myVM_disk1_xxx",
- "osType": "Linux"
- }
- type:
- description:
- - The type of identity used for the virtual machine.
- type: str
- returned: always
- sample: "Microsoft.Compute/virtualMachines"
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.azure_rm_common_rest import GenericRestClient
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.service_client import ServiceClient
- from msrestazure.tools import resource_id, is_valid_resource_id
- import json
-
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMResourceInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- url=dict(
- type='str'
- ),
- provider=dict(
- type='str'
- ),
- resource_group=dict(
- type='str'
- ),
- resource_type=dict(
- type='str'
- ),
- resource_name=dict(
- type='str'
- ),
- subresource=dict(
- type='list',
- default=[]
- ),
- api_version=dict(
- type='str'
- )
- )
- # store the results of the module operation
- self.results = dict(
- response=[]
- )
- self.mgmt_client = None
- self.url = None
- self.api_version = None
- self.provider = None
- self.resource_group = None
- self.resource_type = None
- self.resource_name = None
- self.subresource = []
- super(AzureRMResourceInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_resource_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_resource_facts' module has been renamed to 'azure_rm_resource_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if self.url is None:
- orphan = None
- rargs = dict()
- rargs['subscription'] = self.subscription_id
- rargs['resource_group'] = self.resource_group
- if not (self.provider is None or self.provider.lower().startswith('.microsoft')):
- rargs['namespace'] = "Microsoft." + self.provider
- else:
- rargs['namespace'] = self.provider
-
- if self.resource_type is not None and self.resource_name is not None:
- rargs['type'] = self.resource_type
- rargs['name'] = self.resource_name
- for i in range(len(self.subresource)):
- resource_ns = self.subresource[i].get('namespace', None)
- resource_type = self.subresource[i].get('type', None)
- resource_name = self.subresource[i].get('name', None)
- if resource_type is not None and resource_name is not None:
- rargs['child_namespace_' + str(i + 1)] = resource_ns
- rargs['child_type_' + str(i + 1)] = resource_type
- rargs['child_name_' + str(i + 1)] = resource_name
- else:
- orphan = resource_type
- else:
- orphan = self.resource_type
-
- self.url = resource_id(**rargs)
-
- if orphan is not None:
- self.url += '/' + orphan
-
- # if api_version was not specified, get latest one
- if not self.api_version:
- try:
- # extract provider and resource type
- if "/providers/" in self.url:
- provider = self.url.split("/providers/")[1].split("/")[0]
- resourceType = self.url.split(provider + "/")[1].split("/")[0]
- url = "/subscriptions/" + self.subscription_id + "/providers/" + provider
- api_versions = json.loads(self.mgmt_client.query(url, "GET", {'api-version': '2015-01-01'}, None, None, [200], 0, 0).text)
- for rt in api_versions['resourceTypes']:
- if rt['resourceType'].lower() == resourceType.lower():
- self.api_version = rt['apiVersions'][0]
- break
- else:
- # if there's no provider in API version, assume Microsoft.Resources
- self.api_version = '2018-05-01'
- if not self.api_version:
- self.fail("Couldn't find api version for {0}/{1}".format(provider, resourceType))
- except Exception as exc:
- self.fail("Failed to obtain API version: {0}".format(str(exc)))
-
- self.results['url'] = self.url
-
- query_parameters = {}
- query_parameters['api-version'] = self.api_version
-
- header_parameters = {}
- header_parameters['Content-Type'] = 'application/json; charset=utf-8'
- skiptoken = None
-
- while True:
- if skiptoken:
- query_parameters['skiptoken'] = skiptoken
- response = self.mgmt_client.query(self.url, "GET", query_parameters, header_parameters, None, [200, 404], 0, 0)
- try:
- response = json.loads(response.text)
- if isinstance(response, dict):
- if response.get('value'):
- self.results['response'] = self.results['response'] + response['value']
- skiptoken = response.get('nextLink')
- else:
- self.results['response'] = self.results['response'] + [response]
- except Exception as e:
- self.fail('Failed to parse response: ' + str(e))
- if not skiptoken:
- break
- return self.results
-
-
-def main():
- AzureRMResourceInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_resourcegroup.py b/lib/ansible/modules/cloud/azure/azure_rm_resourcegroup.py
deleted file mode 100644
index f39a792744..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_resourcegroup.py
+++ /dev/null
@@ -1,291 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_resourcegroup
-version_added: "2.1"
-short_description: Manage Azure resource groups
-description:
- - Create, update and delete a resource group.
-options:
- force_delete_nonempty:
- description:
- - Remove a resource group and all associated resources.
- - Use with I(state=absent) to delete a resource group that contains resources.
- type: bool
- aliases:
- - force
- default: 'no'
- location:
- description:
- - Azure location for the resource group. Required when creating a new resource group.
- - Cannot be changed once resource group is created.
- name:
- description:
- - Name of the resource group.
- required: true
- state:
- description:
- - Assert the state of the resource group. Use C(present) to create or update and C(absent) to delete.
- - When C(absent) a resource group containing resources will not be removed unless the I(force) option is used.
- default: present
- choices:
- - absent
- - present
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Chris Houseknecht (@chouseknecht)
- - Matt Davis (@nitzmahone)
-
-'''
-
-EXAMPLES = '''
- - name: Create a resource group
- azure_rm_resourcegroup:
- name: myResourceGroup
- location: westus
- tags:
- testing: testing
- delete: never
-
- - name: Delete a resource group
- azure_rm_resourcegroup:
- name: myResourceGroup
- state: absent
-
- - name: Delete a resource group including resources it contains
- azure_rm_resourcegroup:
- name: myResourceGroup
- force_delete_nonempty: yes
- state: absent
-'''
-RETURN = '''
-contains_resources:
- description:
- - Whether or not the resource group contains associated resources.
- returned: always
- type: bool
- sample: True
-state:
- description:
- - Current state of the resource group.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup"
- location:
- description:
- - Azure location for the resource group.
- returned: always
- type: str
- sample: westus
- name:
- description:
- - The resource group name.
- returned: always
- type: str
- sample: Testing
- provisioning_state:
- description:
- - Provisioning state of the resource group.
- returned: always
- type: str
- sample: Succeeded
- tags:
- description:
- - The resource group's tags.
- returned: always
- type: dict
- sample: {
- "delete": "on-exit",
- "testing": "no"
- }
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, normalize_location_name
-
-
-def resource_group_to_dict(rg):
- return dict(
- id=rg.id,
- name=rg.name,
- location=rg.location,
- tags=rg.tags,
- provisioning_state=rg.properties.provisioning_state
- )
-
-
-class AzureRMResourceGroup(AzureRMModuleBase):
-
- def __init__(self):
- self.module_arg_spec = dict(
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- location=dict(type='str'),
- force_delete_nonempty=dict(type='bool', default=False, aliases=['force'])
- )
-
- self.name = None
- self.state = None
- self.location = None
- self.tags = None
- self.force_delete_nonempty = None
-
- self.results = dict(
- changed=False,
- contains_resources=False,
- state=dict(),
- )
-
- super(AzureRMResourceGroup, self).__init__(self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- results = dict()
- changed = False
- rg = None
- contains_resources = False
-
- try:
- self.log('Fetching resource group {0}'.format(self.name))
- rg = self.rm_client.resource_groups.get(self.name)
- self.check_provisioning_state(rg, self.state)
- contains_resources = self.resources_exist()
-
- results = resource_group_to_dict(rg)
- if self.state == 'absent':
- self.log("CHANGED: resource group {0} exists but requested state is 'absent'".format(self.name))
- changed = True
- elif self.state == 'present':
- update_tags, results['tags'] = self.update_tags(results['tags'])
- self.log("update tags %s" % update_tags)
- self.log("new tags: %s" % str(results['tags']))
- if update_tags:
- changed = True
-
- if self.location and normalize_location_name(self.location) != results['location']:
- self.fail("Resource group '{0}' already exists in location '{1}' and cannot be "
- "moved.".format(self.name, results['location']))
- except CloudError:
- self.log('Resource group {0} does not exist'.format(self.name))
- if self.state == 'present':
- self.log("CHANGED: resource group {0} does not exist but requested state is "
- "'present'".format(self.name))
- changed = True
-
- self.results['changed'] = changed
- self.results['state'] = results
- self.results['contains_resources'] = contains_resources
-
- if self.check_mode:
- return self.results
-
- if changed:
- if self.state == 'present':
- if not rg:
- # Create resource group
- self.log("Creating resource group {0}".format(self.name))
- if not self.location:
- self.fail("Parameter error: location is required when creating a resource group.")
- if self.name_exists():
- self.fail("Error: a resource group with the name {0} already exists in your subscription."
- .format(self.name))
- params = self.rm_models.ResourceGroup(
- location=self.location,
- tags=self.tags
- )
- else:
- # Update resource group
- params = self.rm_models.ResourceGroup(
- location=results['location'],
- tags=results['tags']
- )
- self.results['state'] = self.create_or_update_resource_group(params)
- elif self.state == 'absent':
- if contains_resources and not self.force_delete_nonempty:
- self.fail("Error removing resource group {0}. Resources exist within the group. "
- "Use `force_delete_nonempty` to force delete. "
- "To list resources under {0}, use `azure_rm_resourcegroup_facts` module with `list_resources` option.".format(self.name))
- self.delete_resource_group()
-
- return self.results
-
- def create_or_update_resource_group(self, params):
- try:
- result = self.rm_client.resource_groups.create_or_update(self.name, params)
- except Exception as exc:
- self.fail("Error creating or updating resource group {0} - {1}".format(self.name, str(exc)))
- return resource_group_to_dict(result)
-
- def delete_resource_group(self):
- try:
- poller = self.rm_client.resource_groups.delete(self.name)
- self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error delete resource group {0} - {1}".format(self.name, str(exc)))
-
- # The delete operation doesn't return anything.
- # If we got here, assume all is good
- self.results['state']['status'] = 'Deleted'
- return True
-
- def resources_exist(self):
- found = False
- try:
- response = self.rm_client.resources.list_by_resource_group(self.name)
- except AttributeError:
- response = self.rm_client.resource_groups.list_resources(self.name)
- except Exception as exc:
- self.fail("Error checking for resource existence in {0} - {1}".format(self.name, str(exc)))
-
- for item in response:
- found = True
- break
- return found
-
- def name_exists(self):
- try:
- exists = self.rm_client.resource_groups.check_existence(self.name)
- except Exception as exc:
- self.fail("Error checking for existence of name {0} - {1}".format(self.name, str(exc)))
- return exists
-
-
-def main():
- AzureRMResourceGroup()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_resourcegroup_info.py b/lib/ansible/modules/cloud/azure/azure_rm_resourcegroup_info.py
deleted file mode 100644
index bf1846313d..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_resourcegroup_info.py
+++ /dev/null
@@ -1,240 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_resourcegroup_info
-
-version_added: "2.1"
-
-short_description: Get resource group facts
-
-description:
- - Get facts for a specific resource group or all resource groups.
-
-options:
- name:
- description:
- - Limit results to a specific resource group.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- list_resources:
- description:
- - List all resources under the resource group.
- - Note this will cost network overhead for each resource group. Suggest use this when I(name) set.
- version_added: "2.8"
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Chris Houseknecht (@chouseknecht)
- - Matt Davis (@nitzmahone)
-
-'''
-
-EXAMPLES = '''
- - name: Get facts for one resource group
- azure_rm_resourcegroup_info:
- name: myResourceGroup
-
- - name: Get facts for all resource groups
- azure_rm_resourcegroup_info:
-
- - name: Get facts by tags
- azure_rm_resourcegroup_info:
- tags:
- - testing
- - foo:bar
-
- - name: Get facts for one resource group including resources it contains
- azure_rm_resourcegroup_info:
- name: myResourceGroup
- list_resources: yes
-'''
-RETURN = '''
-azure_resourcegroups:
- description:
- - List of resource group dicts.
- returned: always
- type: list
- contains:
- id:
- description:
- - Resource id.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup"
- name:
- description:
- - Resource group name.
- returned: always
- type: str
- sample: foo
- tags:
- description:
- - Tags assigned to resource group.
- returned: always
- type: dict
- sample: { "tag": "value" }
- resources:
- description:
- - List of resources under the resource group.
- returned: when I(list_resources=yes).
- type: list
- contains:
- id:
- description:
- - Resource id.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMa
- chines/myVirtualMachine"
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: myVirtualMachine
- location:
- description:
- - Resource region.
- returned: always
- type: str
- sample: eastus
- type:
- description:
- - Resource type.
- returned: always
- type: str
- sample: "Microsoft.Compute/virtualMachines"
- tags:
- description:
- - Tags to assign to the managed disk.
- returned: always
- type: dict
- sample: { "tag": "value" }
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-
-AZURE_OBJECT_CLASS = 'ResourceGroup'
-
-
-class AzureRMResourceGroupInfo(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str'),
- tags=dict(type='list'),
- list_resources=dict(type='bool')
- )
-
- self.results = dict(
- changed=False,
- resourcegroups=[]
- )
-
- self.name = None
- self.tags = None
- self.list_resources = None
-
- super(AzureRMResourceGroupInfo, self).__init__(self.module_arg_spec,
- supports_tags=False,
- facts_module=True)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_resourcegroup_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_resourcegroup_facts' module has been renamed to 'azure_rm_resourcegroup_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name:
- result = self.get_item()
- else:
- result = self.list_items()
-
- if self.list_resources:
- for item in result:
- item['resources'] = self.list_by_rg(item['name'])
-
- if is_old_facts:
- self.results['ansible_facts'] = dict(
- azure_resourcegroups=result
- )
- self.results['resourcegroups'] = result
-
- return self.results
-
- def get_item(self):
- self.log('Get properties for {0}'.format(self.name))
- item = None
- result = []
-
- try:
- item = self.rm_client.resource_groups.get(self.name)
- except CloudError:
- pass
-
- if item and self.has_tags(item.tags, self.tags):
- result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
-
- return result
-
- def list_items(self):
- self.log('List all items')
- try:
- response = self.rm_client.resource_groups.list()
- except CloudError as exc:
- self.fail("Failed to list all items - {0}".format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
- return results
-
- def list_by_rg(self, name):
- self.log('List resources under resource group')
- results = []
- try:
- response = self.rm_client.resources.list_by_resource_group(name)
- while True:
- results.append(response.next().as_dict())
- except StopIteration:
- pass
- except CloudError as exc:
- self.fail('Error when listing resources under resource group {0}: {1}'.format(name, exc.message or str(exc)))
- return results
-
-
-def main():
- AzureRMResourceGroupInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_roleassignment.py b/lib/ansible/modules/cloud/azure/azure_rm_roleassignment.py
deleted file mode 100644
index a084db65a0..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_roleassignment.py
+++ /dev/null
@@ -1,283 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yunge Zhu, (@yungezz)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_roleassignment
-version_added: "2.8"
-short_description: Manage Azure Role Assignment
-description:
- - Create and delete instance of Azure Role Assignment.
-
-options:
- name:
- description:
- - Unique name of role assignment.
- assignee_object_id:
- description:
- - The object id of assignee. This maps to the ID inside the Active Directory.
- - It can point to a user, service principal or security group.
- - Required when creating role assignment.
- role_definition_id:
- description:
- - The role definition id used in the role assignment.
- - Required when creating role assignment.
- scope:
- description:
- - The scope of the role assignment to create.
- - For example, use /subscriptions/{subscription-id}/ for subscription.
- - /subscriptions/{subscription-id}/resourceGroups/{resource-group-name} for resource group.
- - /subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider}/{resource-type}/{resource-name} for resource.
- state:
- description:
- - Assert the state of the role assignment.
- - Use C(present) to create or update a role assignment and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yunge Zhu(@yungezz)
-
-'''
-
-EXAMPLES = '''
- - name: Create a role assignment
- azure_rm_roleassignment:
- scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- assignee_object_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- role_definition_id:
- "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleDefinitions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
-
- - name: Delete a role assignment
- azure_rm_roleassignment:
- name: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- state: absent
-
-'''
-
-RETURN = '''
-id:
- description:
- - Id of current role assignment.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleAssignments/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
-'''
-
-import uuid
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.azure_operation import AzureOperationPoller
- from msrest.serialization import Model
- from azure.mgmt.authorization import AuthorizationManagementClient
- from azure.mgmt.authorization.models import RoleAssignmentCreateParameters
-
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def roleassignment_to_dict(assignment):
- return dict(
- id=assignment.id,
- name=assignment.name,
- type=assignment.type,
- assignee_object_id=assignment.principal_id,
- role_definition_id=assignment.role_definition_id,
- scope=assignment.scope
- )
-
-
-class AzureRMRoleAssignment(AzureRMModuleBase):
- """Configuration class for an Azure RM Role Assignment"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- name=dict(
- type='str'
- ),
- scope=dict(
- type='str'
- ),
- assignee_object_id=dict(
- type='str'
- ),
- role_definition_id=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.name = None
- self.scope = None
- self.assignee_object_id = None
- self.role_definition_id = None
-
- self.results = dict(
- changed=False,
- id=None,
- )
- self.state = None
-
- self._client = None
-
- super(AzureRMRoleAssignment, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
-
- old_response = None
- response = None
-
- # get management client
- self._client = self.get_mgmt_svc_client(AuthorizationManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version="2018-01-01-preview")
-
- # build cope
- self.scope = self.build_scope()
-
- if self.name is None:
- self.name = str(uuid.uuid4())
-
- # get existing role assignment
- old_response = self.get_roleassignment()
-
- if old_response:
- self.results['id'] = old_response['id']
-
- if self.state == 'present':
- # check if the role assignment exists
- if not old_response:
- self.log("Role assignment doesn't exist in this scope")
-
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
- response = self.create_roleassignment()
- self.results['id'] = response['id']
-
- else:
- self.log("Role assignment already exists, not updatable")
- self.log('Result: {0}'.format(old_response))
-
- elif self.state == 'absent':
- if old_response:
- self.log("Delete role assignment")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_roleassignment(old_response['id'])
-
- self.log('role assignment deleted')
-
- else:
- self.fail("role assignment {0} not exists.".format(self.name))
-
- return self.results
-
- # build scope
- def build_scope(self):
- subscription_scope = '/subscription/' + self.subscription_id
- if self.scope is None:
- return subscription_scope
- return self.scope
-
- def create_roleassignment(self):
- '''
- Creates role assignment.
-
- :return: deserialized role assignment
- '''
- self.log("Creating role assignment {0}".format(self.name))
-
- try:
- parameters = RoleAssignmentCreateParameters(role_definition_id=self.role_definition_id, principal_id=self.assignee_object_id)
- response = self._client.role_assignments.create(scope=self.scope,
- role_assignment_name=self.name,
- parameters=parameters)
-
- except CloudError as exc:
- self.log('Error attempting to create role assignment.')
- self.fail("Error creating role assignment: {0}".format(str(exc)))
- return roleassignment_to_dict(response)
-
- def delete_roleassignment(self, assignment_id):
- '''
- Deletes specified role assignment.
-
- :return: True
- '''
- self.log("Deleting the role assignment {0}".format(self.name))
- scope = self.build_scope()
- try:
- response = self._client.role_assignments.delete_by_id(role_id=assignment_id)
- except CloudError as e:
- self.log('Error attempting to delete the role assignment.')
- self.fail("Error deleting the role assignment: {0}".format(str(e)))
-
- return True
-
- def get_roleassignment(self):
- '''
- Gets the properties of the specified role assignment.
-
- :return: deserialized role assignment dictionary
- '''
- self.log("Checking if the role assignment {0} is present".format(self.name))
-
- response = None
-
- try:
- response = list(self._client.role_assignments.list())
- if response:
- for assignment in response:
- if assignment.name == self.name and assignment.scope == self.scope:
- return roleassignment_to_dict(assignment)
-
- except CloudError as ex:
- self.log("Didn't find role assignment {0} in scope {1}".format(self.name, self.scope))
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMRoleAssignment()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_roleassignment_info.py b/lib/ansible/modules/cloud/azure/azure_rm_roleassignment_info.py
deleted file mode 100644
index 7a9ee390a0..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_roleassignment_info.py
+++ /dev/null
@@ -1,280 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yunge Zhu, (@yungezz)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_roleassignment_info
-version_added: "2.9"
-short_description: Gets Azure Role Assignment facts
-description:
- - Gets facts of Azure Role Assignment.
-
-options:
- scope:
- description:
- - The scope that the role assignment applies to.
- - For example, use /subscriptions/{subscription-id}/ for a subscription.
- - /subscriptions/{subscription-id}/resourceGroups/{resourcegroup-name} for a resource group.
- - /subscriptions/{subscription-id}/resourceGroups/{resourcegroup-name}/providers/{resource-provider}/{resource-type}/{resource-name} for a resource.
- name:
- description:
- - Name of role assignment.
- - Mutual exclusive with I(assignee).
- assignee:
- description:
- - Object id of a user, group or service principal.
- - Mutually exclusive with I(name).
- role_definition_id:
- description:
- - Resource id of role definition.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yunge Zhu(@yungezz)
-
-'''
-
-EXAMPLES = '''
- - name: Get role assignments for specific service principal
- azure_rm_roleassignment_info:
- assignee: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
-
- - name: Get role assignments for specific scope
- azure_rm_roleassignment_info:
- scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
-'''
-
-RETURN = '''
-roleassignments:
- description:
- - List of role assignments.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Id of role assignment.
- type: str
- returned: always
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleAssignments/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
- name:
- description:
- - Name of role assignment.
- type: str
- returned: always
- sample: myRoleAssignment
- type:
- description:
- - Type of role assignment.
- type: str
- returned: always
- sample: custom
- principal_id:
- description:
- - Principal Id of the role assigned to.
- type: str
- returned: always
- sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- role_definition_id:
- description:
- - Role definition id that was assigned to principal_id.
- type: str
- returned: always
- sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- scope:
- description:
- - The role assignment scope.
- type: str
- returned: always
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.serialization import Model
- from azure.mgmt.authorization import AuthorizationManagementClient
-
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def roleassignment_to_dict(assignment):
- return dict(
- id=assignment.id,
- name=assignment.name,
- type=assignment.type,
- principal_id=assignment.principal_id,
- role_definition_id=assignment.role_definition_id,
- scope=assignment.scope
- )
-
-
-class AzureRMRoleAssignmentInfo(AzureRMModuleBase):
-
- def __init__(self):
- self.module_arg_spec = dict(
- name=dict(
- type='str'
- ),
- scope=dict(
- type='str'
- ),
- assignee=dict(
- type='str'
- ),
- role_definition_id=dict(
- type='str'
- )
- )
-
- self.name = None
- self.scope = None
- self.assignee = None
- self.role_definition_id = None
-
- self.results = dict(
- changed=False
- )
-
- self._client = None
-
- mutually_exclusive = [['name', 'assignee']]
-
- super(AzureRMRoleAssignmentInfo, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_tags=False,
- mutually_exclusive=mutually_exclusive)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
- is_old_facts = self.module._name == 'azure_rm_roleassignment_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_roleassignment_facts' module has been renamed to 'azure_rm_roleassignment_info'", version='2.13')
-
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
-
- # get management client
- self._client = self.get_mgmt_svc_client(AuthorizationManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version="2018-01-01-preview")
-
- if self.name:
- self.results['roleassignments'] = self.get_by_name()
- elif self.assignee:
- self.results['roleassignments'] = self.get_by_assignee()
- elif self.scope:
- self.results['roleassignments'] = self.list_by_scope()
- else:
- self.fail("Please specify name or assignee")
-
- return self.results
-
- def get_by_name(self):
- '''
- Gets the properties of the specified role assignment by name.
-
- :return: deserialized role assignment dictionary
- '''
- self.log("Gets role assignment {0} by name".format(self.name))
-
- results = []
-
- try:
- response = self._client.role_assignments.get(scope=self.scope, role_assignment_name=self.name)
-
- if response:
- response = roleassignment_to_dict(response)
-
- if self.role_definition_id:
- if self.role_definition_id == response['role_definition_id']:
- results = [response]
- else:
- results = [response]
-
- except CloudError as ex:
- self.log("Didn't find role assignment {0} in scope {1}".format(self.name, self.scope))
-
- return results
-
- def get_by_assignee(self):
- '''
- Gets the role assignments by assignee.
-
- :return: deserialized role assignment dictionary
- '''
- self.log("Gets role assignment {0} by name".format(self.name))
-
- results = []
- filter = "principalId eq '{0}'".format(self.assignee)
- try:
- response = list(self._client.role_assignments.list(filter=filter))
-
- if response and len(response) > 0:
- response = [roleassignment_to_dict(a) for a in response]
-
- if self.role_definition_id:
- for r in response:
- if r['role_definition_id'] == self.role_definition_id:
- results.append(r)
- else:
- results = response
-
- except CloudError as ex:
- self.log("Didn't find role assignments to assignee {0}".format(self.assignee))
-
- return results
-
- def list_by_scope(self):
- '''
- Lists the role assignments by specific scope.
-
- :return: deserialized role assignment dictionary
- '''
- self.log("Lists role assignment by scope {0}".format(self.scope))
-
- results = []
- try:
- response = list(self._client.role_assignments.list_for_scope(scope=self.scope, filter='atScope()'))
-
- if response and len(response) > 0:
- response = [roleassignment_to_dict(a) for a in response]
-
- if self.role_definition_id:
- for r in response:
- if r['role_definition_id'] == self.role_definition_id:
- results.append(r)
- else:
- results = response
-
- except CloudError as ex:
- self.log("Didn't find role assignments to scope {0}".format(self.scope))
-
- return results
-
-
-def main():
- """Main execution"""
- AzureRMRoleAssignmentInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_roledefinition.py b/lib/ansible/modules/cloud/azure/azure_rm_roledefinition.py
deleted file mode 100644
index 30308eedc5..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_roledefinition.py
+++ /dev/null
@@ -1,402 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yunge Zhu, (@yungezz)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_roledefinition
-version_added: "2.8"
-short_description: Manage Azure Role Definition
-description:
- - Create, update and delete instance of Azure Role Definition.
-
-options:
- name:
- description:
- - Unique name of role definition.
- required: True
- permissions:
- description:
- - Set of role definition permissions.
- - See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info.
- suboptions:
- actions:
- description:
- - List of allowed actions.
- type: list
- not_actions:
- description:
- - List of denied actions.
- type: list
- data_actions:
- description:
- - List of allowed data actions.
- type: list
- not_data_actions:
- description:
- - List of denied data actions.
- type: list
- assignable_scopes:
- description:
- - List of assignable scopes of this definition.
- scope:
- description:
- - The scope of the role definition.
- description:
- description:
- - The role definition description.
- state:
- description:
- - Assert the state of the role definition.
- - Use C(present) to create or update a role definition; use C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yunge Zhu(@yungezz)
-
-'''
-
-EXAMPLES = '''
- - name: Create a role definition
- azure_rm_roledefinition:
- name: myTestRole
- scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myresourceGroup
- permissions:
- - actions:
- - "Microsoft.Compute/virtualMachines/read"
- data_actions:
- - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write"
- assignable_scopes:
- - "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
-'''
-
-RETURN = '''
-id:
- description:
- - ID of current role definition.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleDefinitions/roleDefinitionId"
-'''
-
-import uuid
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils._text import to_native
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.azure_operation import AzureOperationPoller
- from msrest.polling import LROPoller
- from msrest.serialization import Model
- from azure.mgmt.authorization import AuthorizationManagementClient
- from azure.mgmt.authorization.model import (RoleDefinition, Permission)
-
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-permission_spec = dict(
- actions=dict(
- type='list',
- options=dict(type='str')
- ),
- not_actions=dict(
- type='list',
- options=dict(type='str')
- ),
- data_actions=dict(
- type='list',
- options=dict(type='str')
- ),
- not_data_actions=dict(
- type='list',
- options=dict(type='str')
- ),
-)
-
-
-def roledefinition_to_dict(role):
- result = dict(
- id=role.id,
- name=role.name,
- type=role.role_type,
- assignable_scopes=role.assignable_scopes,
- description=role.description,
- role_name=role.role_name
- )
- if role.permissions:
- result['permissions'] = [dict(
- actions=p.actions,
- not_actions=p.not_actions,
- data_actions=p.data_actions,
- not_data_actions=p.not_data_actions
- ) for p in role.permissions]
- return result
-
-
-class Actions:
- NoAction, CreateOrUpdate, Delete = range(3)
-
-
-class AzureRMRoleDefinition(AzureRMModuleBase):
- """Configuration class for an Azure RM Role definition resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- name=dict(
- type='str',
- required=True
- ),
- scope=dict(
- type='str'
- ),
- permissions=dict(
- type='list',
- elements='dict',
- options=permission_spec
- ),
- assignable_scopes=dict(
- type='list',
- elements='str'
- ),
- description=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.name = None
- self.scope = None
- self.permissions = None
- self.description = None
- self.assignable_scopes = None
-
- self.results = dict(
- changed=False,
- id=None,
- )
- self.state = None
- self.to_do = Actions.NoAction
-
- self.role = None
-
- self._client = None
-
- super(AzureRMRoleDefinition, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
-
- old_response = None
- response = None
-
- # get management client
- self._client = self.get_mgmt_svc_client(AuthorizationManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version="2018-01-01-preview")
-
- self.scope = self.build_scope()
-
- # get existing role definition
- old_response = self.get_roledefinition()
-
- if old_response:
- self.results['id'] = old_response['id']
- self.role = old_response
-
- if self.state == 'present':
- # check if the role definition exists
- if not old_response:
- self.log("Role definition doesn't exist in this scope")
-
- self.to_do = Actions.CreateOrUpdate
-
- else:
- # existing role definition, do update
- self.log("Role definition already exists")
- self.log('Result: {0}'.format(old_response))
-
- # compare if role definition changed
- if self.check_update(old_response):
- self.to_do = Actions.CreateOrUpdate
-
- elif self.state == 'absent':
- if old_response:
- self.log("Delete role definition")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_roledefinition(old_response['name'])
-
- self.log('role definition deleted')
-
- else:
- self.log("role definition {0} not exists.".format(self.name))
-
- if self.to_do == Actions.CreateOrUpdate:
- self.log('Need to Create/Update role definition')
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- response = self.create_update_roledefinition()
- self.results['id'] = response['id']
-
- return self.results
-
- # build scope
- def build_scope(self):
- subscription_scope = '/subscriptions/' + self.subscription_id
- if self.scope is None:
- return subscription_scope
- return self.scope
-
- # check update
- def check_update(self, old_definition):
- if self.description and self.description != old_definition['properties']['description']:
- return True
- if self.permissions:
- if len(self.permissions) != len(old_definition['permissions']):
- return True
- existing_permissions = self.permissions_to_set(old_definition['permissions'])
- new_permissions = self.permissions_to_set(self.permissions)
- if existing_permissions != new_permissions:
- return True
- if self.assignable_scopes and self.assignable_scopes != old_definition['assignable_scopes']:
- return True
- return False
-
- def permissions_to_set(self, permissions):
- new_permissions = [str(dict(
- actions=(set([to_native(a) for a in item.get('actions')]) if item.get('actions') else None),
- not_actions=(set([to_native(a) for a in item.get('not_actions')]) if item.get('not_actions') else None),
- data_actions=(set([to_native(a) for a in item.get('data_actions')]) if item.get('data_actions') else None),
- not_data_actions=(set([to_native(a) for a in item.get('not_data_actions')]) if item.get('not_data_actions') else None),
- )) for item in permissions]
- return set(new_permissions)
-
- def create_update_roledefinition(self):
- '''
- Creates or updates role definition.
-
- :return: deserialized role definition
- '''
- self.log("Creating / Updating role definition {0}".format(self.name))
-
- try:
- permissions = None
- if self.permissions:
- permissions = [AuthorizationManagementClient.models("2018-01-01-preview").Permission(
- actions=p.get('actions', None),
- not_actions=p.get('not_actions', None),
- data_actions=p.get('data_actions', None),
- not_data_actions=p.get('not_data_actions', None)
- ) for p in self.permissions]
- role_definition = AuthorizationManagementClient.models("2018-01-01-preview").RoleDefinition(
- role_name=self.name,
- description=self.description,
- permissions=permissions,
- assignable_scopes=self.assignable_scopes,
- role_type='CustomRole')
- if self.role:
- role_definition.name = self.role['name']
- response = self._client.role_definitions.create_or_update(role_definition_id=self.role['name'] if self.role else str(uuid.uuid4()),
- scope=self.scope,
- role_definition=role_definition)
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create role definition.')
- self.fail("Error creating role definition: {0}".format(str(exc)))
- return roledefinition_to_dict(response)
-
- def delete_roledefinition(self, role_definition_id):
- '''
- Deletes specified role definition.
-
- :return: True
- '''
- self.log("Deleting the role definition {0}".format(self.name))
- scope = self.build_scope()
- try:
- response = self._client.role_definitions.delete(scope=scope,
- role_definition_id=role_definition_id)
- if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
- response = self.get_poller_result(response)
- except CloudError as e:
- self.log('Error attempting to delete the role definition.')
- self.fail("Error deleting the role definition: {0}".format(str(e)))
-
- return True
-
- def get_roledefinition(self):
- '''
- Gets the properties of the specified role definition.
-
- :return: deserialized role definition state dictionary
- '''
- self.log("Checking if the role definition {0} is present".format(self.name))
-
- response = None
-
- try:
- response = list(self._client.role_definitions.list(scope=self.scope))
-
- if len(response) > 0:
- self.log("Response : {0}".format(response))
- roles = []
- for r in response:
- if r.role_name == self.name:
- roles.append(r)
-
- if len(roles) == 1:
- self.log("role definition : {0} found".format(self.name))
- return roledefinition_to_dict(roles[0])
- if len(roles) > 1:
- self.fail("Found multiple role definitions: {0}".format(roles))
-
- except CloudError as ex:
- self.log("Didn't find role definition {0}".format(self.name))
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMRoleDefinition()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_roledefinition_info.py b/lib/ansible/modules/cloud/azure/azure_rm_roledefinition_info.py
deleted file mode 100644
index c8fe8781f1..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_roledefinition_info.py
+++ /dev/null
@@ -1,310 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yunge Zhu, (@yungezz)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_roledefinition_info
-version_added: "2.9"
-short_description: Get Azure Role Definition facts
-description:
- - Get facts of Azure Role Definition.
-
-options:
- scope:
- description:
- - The scope of role definition.
- required: True
- id:
- description:
- - Role definition id.
- role_name:
- description:
- - Role name.
- type:
- description:
- - Type of role.
- choices:
- - system
- - custom
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yunge Zhu(@yungezz)
-
-'''
-
-EXAMPLES = '''
- - name: List Role Definitions in scope
- azure_rm_roledefinition_info:
- scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup
-
- - name: Get Role Definition by name
- azure_rm_roledefinition_info:
- scope: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup
- name: myRoleDefinition
-'''
-
-RETURN = '''
-roledefinitions:
- description:
- - A list of Role Definition facts.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Role Definition ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/providers/Microsoft.Authorization/roleDefinitions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
- role_name:
- description:
- - Role name.
- returned: always
- type: str
- sample: myCustomRoleDefinition
- name:
- description:
- - System assigned role name.
- returned: always
- type: str
- sample: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- assignable_scopes:
- description:
- - List of assignable scopes of this definition.
- returned: always
- type: list
- sample: [ "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup" ]
- permissions:
- description:
- - List of Role Definition permissions.
- returned: always
- contains:
- actions:
- description:
- - List of allowed actions.
- returned: always
- type: list
- sample: [ 'Microsoft.Compute/virtualMachines/read' ]
- not_actions:
- description:
- - List of denied actions.
- returned: always
- type: list
- sample: [ 'Microsoft.Compute/virtualMachines/write' ]
- data_actions:
- description:
- - List of allowed data actions.
- returned: always
- type: list
- sample: [ 'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read' ]
- not_data_actions:
- description:
- - List of denied data actions.
- returned: always
- type: list
- sample: [ 'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write' ]
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils._text import to_native
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.serialization import Model
- from azure.mgmt.authorization import AuthorizationManagementClient
-
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def roledefinition_to_dict(role):
- result = dict(
- id=role.id,
- name=role.name,
- type=role.role_type,
- assignable_scopes=role.assignable_scopes,
- description=role.description,
- role_name=role.role_name
- )
- if role.permissions:
- result['permissions'] = [dict(
- actions=p.actions,
- not_actions=p.not_actions,
- data_actions=p.data_actions,
- not_data_actions=p.not_data_actions
- ) for p in role.permissions]
- return result
-
-
-class AzureRMRoleDefinitionInfo(AzureRMModuleBase):
- def __init__(self):
- self.module_arg_spec = dict(
- scope=dict(
- type='str',
- required='true'
- ),
- role_name=dict(type='str'),
- id=dict(type='str'),
- type=dict(
- type='str',
- choices=['custom', 'system'])
- )
-
- self.role_name = None
- self.scope = None
- self.id = None
- self.type = None
-
- self.results = dict(
- changed=False
- )
-
- self._client = None
-
- super(AzureRMRoleDefinitionInfo, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
- is_old_facts = self.module._name == 'azure_rm_roledefinition_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_roledefinition_facts' module has been renamed to 'azure_rm_roledefinition_info'", version='2.13')
-
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
-
- if self.type:
- self.type = self.get_role_type(self.type)
-
- # get management client
- self._client = self.get_mgmt_svc_client(AuthorizationManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version="2018-01-01-preview")
-
- if self.id:
- self.results['roledefinitions'] = self.get_by_id()
- elif self.role_name:
- self.results['roledefinitions'] = self.get_by_role_name()
- else:
- self.results['roledefinitions'] = self.list()
-
- return self.results
-
- def get_role_type(self, role_type):
- if role_type:
- if role_type == 'custom':
- return 'CustomRole'
- else:
- return 'SystemRole'
- return role_type
-
- def list(self):
- '''
- List Role Definition in scope.
-
- :return: deserialized Role Definition state dictionary
- '''
- self.log("List Role Definition in scope {0}".format(self.scope))
-
- response = []
-
- try:
- response = list(self._client.role_definitions.list(scope=self.scope))
-
- if len(response) > 0:
- self.log("Response : {0}".format(response))
- roles = []
-
- if self.type:
- roles = [r for r in response if r.role_type == self.type]
- else:
- roles = response
-
- if len(roles) > 0:
- return [roledefinition_to_dict(r) for r in roles]
-
- except CloudError as ex:
- self.log("Didn't find role definition in scope {0}".format(self.scope))
-
- return response
-
- def get_by_id(self):
- '''
- Get Role Definition in scope by id.
-
- :return: deserialized Role Definition state dictionary
- '''
- self.log("Get Role Definition by id {0}".format(self.id))
-
- response = None
-
- try:
- response = self._client.role_definitions.get(scope=self.scope, role_definition_id=self.id)
- if response:
- response = roledefinition_to_dict(response)
- if self.type:
- if response.role_type == self.type:
- return [response]
- else:
- return [response]
-
- except CloudError as ex:
- self.log("Didn't find role definition by id {0}".format(self.id))
-
- return []
-
- def get_by_role_name(self):
- '''
- Get Role Definition in scope by role name.
-
- :return: deserialized role definition state dictionary
- '''
- self.log("Get Role Definition by name {0}".format(self.role_name))
-
- response = []
-
- try:
- response = self.list()
-
- if len(response) > 0:
- roles = []
- for r in response:
- if r['role_name'] == self.role_name:
- roles.append(r)
-
- if len(roles) == 1:
- self.log("Role Definition : {0} found".format(self.role_name))
- return roles
- if len(roles) > 1:
- self.fail("Found multiple Role Definitions with name: {0}".format(self.role_name))
-
- except CloudError as ex:
- self.log("Didn't find Role Definition by name {0}".format(self.role_name))
-
- return []
-
-
-def main():
- """Main execution"""
- AzureRMRoleDefinitionInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_route.py b/lib/ansible/modules/cloud/azure/azure_rm_route.py
deleted file mode 100644
index dd2d0e8324..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_route.py
+++ /dev/null
@@ -1,220 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_route
-version_added: "2.7"
-short_description: Manage Azure route resource
-description:
- - Create, update or delete a route.
-options:
- resource_group:
- description:
- - Name of resource group.
- required: true
- name:
- description:
- - Name of the route.
- required: true
- state:
- description:
- - Assert the state of the route. Use C(present) to create or update and C(absent) to delete.
- default: present
- choices:
- - absent
- - present
- address_prefix:
- description:
- - The destination CIDR to which the route applies.
- next_hop_type:
- description:
- - The type of Azure hop the packet should be sent to.
- choices:
- - virtual_network_gateway
- - vnet_local
- - internet
- - virtual_appliance
- - none
- default: 'none'
- next_hop_ip_address:
- description:
- - The IP address packets should be forwarded to.
- - Next hop values are only allowed in routes where the next hop type is VirtualAppliance.
- route_table_name:
- description:
- - The name of the route table.
- required: true
-
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
- - name: Create a route
- azure_rm_route:
- resource_group: myResourceGroup
- name: myRoute
- address_prefix: 10.1.0.0/16
- next_hop_type: virtual_network_gateway
- route_table_name: table
-
- - name: Delete a route
- azure_rm_route:
- resource_group: myResourceGroup
- name: myRoute
- route_table_name: table
- state: absent
-'''
-RETURN = '''
-id:
- description:
- - Current state of the route.
- returned: success
- type: str
- sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/routeTables/tableb57/routes/routeb57"
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _snake_to_camel
-
-
-class AzureRMRoute(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- address_prefix=dict(type='str'),
- next_hop_type=dict(type='str',
- choices=['virtual_network_gateway',
- 'vnet_local',
- 'internet',
- 'virtual_appliance',
- 'none'],
- default='none'),
- next_hop_ip_address=dict(type='str'),
- route_table_name=dict(type='str', required=True)
- )
-
- required_if = [
- ('state', 'present', ['next_hop_type'])
- ]
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.address_prefix = None
- self.next_hop_type = None
- self.next_hop_ip_address = None
- self.route_table_name = None
-
- self.results = dict(
- changed=False,
- id=None
- )
-
- super(AzureRMRoute, self).__init__(self.module_arg_spec,
- required_if=required_if,
- supports_check_mode=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()):
- setattr(self, key, kwargs[key])
-
- result = dict()
- changed = False
-
- self.next_hop_type = _snake_to_camel(self.next_hop_type, capitalize_first=True)
-
- result = self.get_route()
- if self.state == 'absent' and result:
- changed = True
- if not self.check_mode:
- self.delete_route()
- elif self.state == 'present':
- if not result:
- changed = True # create new route
- else: # check update
- if result.next_hop_type != self.next_hop_type:
- self.log('Update: {0} next_hop_type from {1} to {2}'.format(self.name, result.next_hop_type, self.next_hop_type))
- changed = True
- if result.next_hop_ip_address != self.next_hop_ip_address:
- self.log('Update: {0} next_hop_ip_address from {1} to {2}'.format(self.name, result.next_hop_ip_address, self.next_hop_ip_address))
- changed = True
- if result.address_prefix != self.address_prefix:
- self.log('Update: {0} address_prefix from {1} to {2}'.format(self.name, result.address_prefix, self.address_prefix))
- changed = True
- if changed:
- result = self.network_models.Route(name=self.name,
- address_prefix=self.address_prefix,
- next_hop_type=self.next_hop_type,
- next_hop_ip_address=self.next_hop_ip_address)
- if not self.check_mode:
- result = self.create_or_update_route(result)
-
- self.results['id'] = result.id if result else None
- self.results['changed'] = changed
- return self.results
-
- def create_or_update_route(self, param):
- try:
- poller = self.network_client.routes.create_or_update(self.resource_group, self.route_table_name, self.name, param)
- return self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error creating or updating route {0} - {1}".format(self.name, str(exc)))
-
- def delete_route(self):
- try:
- poller = self.network_client.routes.delete(self.resource_group, self.route_table_name, self.name)
- result = self.get_poller_result(poller)
- return result
- except Exception as exc:
- self.fail("Error deleting route {0} - {1}".format(self.name, str(exc)))
-
- def get_route(self):
- try:
- return self.network_client.routes.get(self.resource_group, self.route_table_name, self.name)
- except CloudError as cloud_err:
- # Return None iff the resource is not found
- if cloud_err.status_code == 404:
- self.log('{0}'.format(str(cloud_err)))
- return None
- self.fail('Error: failed to get resource {0} - {1}'.format(self.name, str(cloud_err)))
- except Exception as exc:
- self.fail('Error: failed to get resource {0} - {1}'.format(self.name, str(exc)))
-
-
-def main():
- AzureRMRoute()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_routetable.py b/lib/ansible/modules/cloud/azure/azure_rm_routetable.py
deleted file mode 100644
index 8eb203b93d..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_routetable.py
+++ /dev/null
@@ -1,195 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_routetable
-version_added: "2.7"
-short_description: Manage Azure route table resource
-description:
- - Create, update or delete a route table.
-options:
- resource_group:
- description:
- - Name of resource group.
- required: true
- name:
- description:
- - Name of the route table.
- required: true
- state:
- description:
- - Assert the state of the route table. Use C(present) to create or update and C(absent) to delete.
- default: present
- choices:
- - absent
- - present
- disable_bgp_route_propagation:
- description:
- - Specified whether to disable the routes learned by BGP on that route table.
- type: bool
- default: False
- location:
- description:
- - Region of the resource.
- - Derived from I(resource_group) if not specified.
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
- - name: Create a route table
- azure_rm_routetable:
- resource_group: myResourceGroup
- name: myRouteTable
- disable_bgp_route_propagation: False
- tags:
- purpose: testing
-
- - name: Delete a route table
- azure_rm_routetable:
- resource_group: myResourceGroup
- name: myRouteTable
- state: absent
-'''
-RETURN = '''
-changed:
- description:
- - Whether the resource is changed.
- returned: always
- type: bool
- sample: true
-id:
- description:
- - Resource ID.
- returned: success
- type: str
- sample: "/subscriptions/xxx...xxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/routeTables/tableb57dc95642/routes/routeb57dc95986"
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, normalize_location_name
-
-
-class AzureRMRouteTable(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- location=dict(type='str'),
- disable_bgp_route_propagation=dict(type='bool', default=False)
- )
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.location = None
- self.tags = None
- self.disable_bgp_route_propagation = None
-
- self.results = dict(
- changed=False
- )
-
- super(AzureRMRouteTable, self).__init__(self.module_arg_spec,
- supports_check_mode=True)
-
- def exec_module(self, **kwargs):
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- # Set default location
- self.location = resource_group.location
- self.location = normalize_location_name(self.location)
-
- result = dict()
- changed = False
-
- result = self.get_table()
- if self.state == 'absent' and result:
- changed = True
- if not self.check_mode:
- self.delete_table()
- elif self.state == 'present':
- if not result:
- changed = True # create new route table
- else: # check update
- update_tags, self.tags = self.update_tags(result.tags)
- if update_tags:
- changed = True
- if self.disable_bgp_route_propagation != result.disable_bgp_route_propagation:
- changed = True
-
- if changed:
- result = self.network_models.RouteTable(location=self.location,
- tags=self.tags,
- disable_bgp_route_propagation=self.disable_bgp_route_propagation)
- if not self.check_mode:
- result = self.create_or_update_table(result)
-
- self.results['id'] = result.id if result else None
- self.results['changed'] = changed
- return self.results
-
- def create_or_update_table(self, param):
- try:
- poller = self.network_client.route_tables.create_or_update(self.resource_group, self.name, param)
- return self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error creating or updating route table {0} - {1}".format(self.name, str(exc)))
-
- def delete_table(self):
- try:
- poller = self.network_client.route_tables.delete(self.resource_group, self.name)
- result = self.get_poller_result(poller)
- return result
- except Exception as exc:
- self.fail("Error deleting virtual network {0} - {1}".format(self.name, str(exc)))
-
- def get_table(self):
- try:
- return self.network_client.route_tables.get(self.resource_group, self.name)
- except CloudError as cloud_err:
- # Return None iff the resource is not found
- if cloud_err.status_code == 404:
- self.log('{0}'.format(str(cloud_err)))
- return None
- self.fail('Error: failed to get resource {0} - {1}'.format(self.name, str(cloud_err)))
- except Exception as exc:
- self.fail('Error: failed to get resource {0} - {1}'.format(self.name, str(exc)))
-
-
-def main():
- AzureRMRouteTable()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_routetable_info.py b/lib/ansible/modules/cloud/azure/azure_rm_routetable_info.py
deleted file mode 100644
index 4ef342a11d..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_routetable_info.py
+++ /dev/null
@@ -1,219 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_routetable_info
-
-version_added: "2.9"
-
-short_description: Get route table facts
-
-description:
- - Get facts for a specific route table or all route table in a resource group or subscription.
-
-options:
- name:
- description:
- - Limit results to a specific route table.
- resource_group:
- description:
- - Limit results in a specific resource group.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
- - name: Get facts for one route table
- azure_rm_routetable_info:
- name: Testing
- resource_group: myResourceGroup
-
- - name: Get facts for all route tables
- azure_rm_routetable_info:
- resource_group: myResourceGroup
-
- - name: Get facts by tags
- azure_rm_routetable_info:
- tags:
- - testing
- - foo:bar
-'''
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: success
- type: str
- sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/routeTables/tableb57dc95236"
-name:
- description:
- - Name of the resource.
- returned: success
- type: str
- sample: tableb57dc95236
-resource_group:
- description:
- - Resource group of the route table.
- returned: success
- type: str
- sample: v-xisuRG
-disable_bgp_route_propagation:
- description:
- - Whether the routes learned by BGP on that route table disabled.
- returned: success
- type: bool
- sample: false
-tags:
- description:
- - Tags of the route table.
- returned: success
- type: dict
- sample: { 'key1':'value1', 'key2':'value2'}
-routes:
- description:
- - Current routes of the route table.
- returned: success
- type: list
- sample: [
- {
- "id": "/subscriptions/xxx...xxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/routeTables/tableb57dc95236/routes/routeb57dc95540",
- "name": "routeb57dc95540",
- "resource_group": "v-xisuRG",
- "route_table_name": "tableb57dc95236",
- "address_prefix": "10.1.0.0/24",
- "next_hop_type": "virtual_network_gateway",
- "next_hop_ip_address": null
- }
- ]
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict
-from ansible.module_utils.common.dict_transformations import _camel_to_snake
-
-
-def route_to_dict(route):
- id_dict = azure_id_to_dict(route.id)
- return dict(
- id=route.id,
- name=route.name,
- resource_group=id_dict.get('resourceGroups'),
- route_table_name=id_dict.get('routeTables'),
- address_prefix=route.address_prefix,
- next_hop_type=_camel_to_snake(route.next_hop_type),
- next_hop_ip_address=route.next_hop_ip_address
- )
-
-
-def instance_to_dict(table):
- return dict(
- id=table.id,
- name=table.name,
- resource_group=azure_id_to_dict(table.id).get('resourceGroups'),
- location=table.location,
- routes=[route_to_dict(i) for i in table.routes] if table.routes else [],
- disable_bgp_route_propagation=table.disable_bgp_route_propagation,
- tags=table.tags
- )
-
-
-class AzureRMRouteTableInfo(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str'),
- resource_group=dict(type='str'),
- tags=dict(type='list')
- )
-
- self.results = dict(
- changed=False,
- route_tables=[]
- )
-
- self.name = None
- self.resource_group = None
- self.tags = None
-
- super(AzureRMRouteTableInfo, self).__init__(self.module_arg_spec,
- supports_tags=False,
- facts_module=True)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_routetable_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_routetable_facts' module has been renamed to 'azure_rm_routetable_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- response = []
- if self.name:
- response = self.get_item()
- elif self.resource_group:
- response = self.list_items()
- else:
- response = self.list_all_items()
-
- self.results['route_tables'] = [instance_to_dict(x) for x in response if self.has_tags(x.tags, self.tags)]
- return self.results
-
- def get_item(self):
- self.log('Get route table for {0}-{1}'.format(self.resource_group, self.name))
- try:
- item = self.network_client.route_tables.get(self.resource_group, self.name)
- return [item]
- except CloudError:
- pass
- return []
-
- def list_items(self):
- self.log('List all items in resource group')
- try:
- return self.network_client.route_tables.list(self.resource_group)
- except CloudError as exc:
- self.fail("Failed to list items - {0}".format(str(exc)))
- return []
-
- def list_all_items(self):
- self.log("List all items in subscription")
- try:
- return self.network_client.route_tables.list_all()
- except CloudError as exc:
- self.fail("Failed to list all items - {0}".format(str(exc)))
- return []
-
-
-def main():
- AzureRMRouteTableInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py b/lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py
deleted file mode 100644
index a399d6a294..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py
+++ /dev/null
@@ -1,817 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_securitygroup
-version_added: "2.1"
-short_description: Manage Azure network security groups
-description:
- - Create, update or delete a network security group.
- - A security group contains Access Control List (ACL) rules that allow or deny network traffic to subnets or individual network interfaces.
- - A security group is created with a set of default security rules and an empty set of security rules.
- - Shape traffic flow by adding rules to the empty set of security rules.
-
-options:
- default_rules:
- description:
- - The set of default rules automatically added to a security group at creation.
- - In general default rules will not be modified. Modify rules to shape the flow of traffic to or from a subnet or NIC.
- - See rules below for the makeup of a rule dict.
- location:
- description:
- - Valid azure location. Defaults to location of the resource group.
- name:
- description:
- - Name of the security group to operate on.
- purge_default_rules:
- description:
- - Remove any existing rules not matching those defined in the default_rules parameter.
- type: bool
- default: 'no'
- purge_rules:
- description:
- - Remove any existing rules not matching those defined in the rules parameters.
- type: bool
- default: 'no'
- resource_group:
- description:
- - Name of the resource group the security group belongs to.
- required: true
- rules:
- description:
- - Set of rules shaping traffic flow to or from a subnet or NIC. Each rule is a dictionary.
- suboptions:
- name:
- description:
- - Unique name for the rule.
- required: true
- description:
- description:
- - Short description of the rule's purpose.
- protocol:
- description:
- - Accepted traffic protocol.
- choices:
- - Udp
- - Tcp
- - "*"
- default: "*"
- source_port_range:
- description:
- - Port or range of ports from which traffic originates.
- - It can accept string type or a list of string type.
- default: "*"
- destination_port_range:
- description:
- - Port or range of ports to which traffic is headed.
- - It can accept string type or a list of string type.
- default: "*"
- source_address_prefix:
- description:
- - The CIDR or source IP range.
- - Asterisk C(*) can also be used to match all source IPs.
- - Default tags such as C(VirtualNetwork), C(AzureLoadBalancer) and C(Internet) can also be used.
- - If this is an ingress rule, specifies where network traffic originates from.
- - It can accept string type or a list of string type.
- default: "*"
- destination_address_prefix:
- description:
- - The destination address prefix.
- - CIDR or destination IP range.
- - Asterisk C(*) can also be used to match all source IPs.
- - Default tags such as C(VirtualNetwork), C(AzureLoadBalancer) and C(Internet) can also be used.
- - It can accept string type or a list of string type.
- default: "*"
- source_application_security_groups:
- description:
- - List of the source application security groups.
- - It could be list of resource id.
- - It could be list of names in same resource group.
- - It could be list of dict containing resource_group and name.
- - It is mutually exclusive with C(source_address_prefix) and C(source_address_prefixes).
- type: list
- destination_application_security_groups:
- description:
- - List of the destination application security groups.
- - It could be list of resource id.
- - It could be list of names in same resource group.
- - It could be list of dict containing I(resource_group) and I(name).
- - It is mutually exclusive with C(destination_address_prefix) and C(destination_address_prefixes).
- type: list
- access:
- description:
- - Whether or not to allow the traffic flow.
- choices:
- - Allow
- - Deny
- default: Allow
- priority:
- description:
- - Order in which to apply the rule. Must a unique integer between 100 and 4096 inclusive.
- required: true
- direction:
- description:
- - Indicates the direction of the traffic flow.
- choices:
- - Inbound
- - Outbound
- default: Inbound
- state:
- description:
- - Assert the state of the security group. Set to C(present) to create or update a security group. Set to C(absent) to remove a security group.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Chris Houseknecht (@chouseknecht)
- - Matt Davis (@nitzmahone)
-
-'''
-
-EXAMPLES = '''
-
-# Create a security group
-- azure_rm_securitygroup:
- resource_group: myResourceGroup
- name: mysecgroup
- purge_rules: yes
- rules:
- - name: DenySSH
- protocol: Tcp
- destination_port_range: 22
- access: Deny
- priority: 100
- direction: Inbound
- - name: 'AllowSSH'
- protocol: Tcp
- source_address_prefix:
- - '174.109.158.0/24'
- - '174.109.159.0/24'
- destination_port_range: 22
- access: Allow
- priority: 101
- direction: Inbound
- - name: 'AllowMultiplePorts'
- protocol: Tcp
- source_address_prefix:
- - '174.109.158.0/24'
- - '174.109.159.0/24'
- destination_port_range:
- - 80
- - 443
- access: Allow
- priority: 102
-
-# Update rules on existing security group
-- azure_rm_securitygroup:
- resource_group: myResourceGroup
- name: mysecgroup
- rules:
- - name: DenySSH
- protocol: Tcp
- destination_port_range: 22-23
- access: Deny
- priority: 100
- direction: Inbound
- - name: AllowSSHFromHome
- protocol: Tcp
- source_address_prefix: '174.109.158.0/24'
- destination_port_range: 22-23
- access: Allow
- priority: 102
- direction: Inbound
- tags:
- testing: testing
- delete: on-exit
-
-# Delete security group
-- azure_rm_securitygroup:
- resource_group: myResourceGroup
- name: mysecgroup
- state: absent
-'''
-
-RETURN = '''
-state:
- description:
- - Current state of the security group.
- returned: always
- type: complex
- contains:
- default_rules:
- description:
- - The default security rules of network security group.
- returned: always
- type: list
- sample: [
- {
- "access": "Allow",
- "description": "Allow inbound traffic from all VMs in VNET",
- "destination_address_prefix": "VirtualNetwork",
- "destination_port_range": "*",
- "direction": "Inbound",
- "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetInBound",
- "name": "AllowVnetInBound",
- "priority": 65000,
- "protocol": "*",
- "provisioning_state": "Succeeded",
- "source_address_prefix": "VirtualNetwork",
- "source_port_range": "*"
- },
- {
- "access": "Allow",
- "description": "Allow inbound traffic from azure load balancer",
- "destination_address_prefix": "*",
- "destination_port_range": "*",
- "direction": "Inbound",
- "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowAzureLoadBalancerInBound",
- "name": "AllowAzureLoadBalancerInBound",
- "priority": 65001,
- "protocol": "*",
- "provisioning_state": "Succeeded",
- "source_address_prefix": "AzureLoadBalancer",
- "source_port_range": "*"
- },
- {
- "access": "Deny",
- "description": "Deny all inbound traffic",
- "destination_address_prefix": "*",
- "destination_port_range": "*",
- "direction": "Inbound",
- "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllInBound",
- "name": "DenyAllInBound",
- "priority": 65500,
- "protocol": "*",
- "provisioning_state": "Succeeded",
- "source_address_prefix": "*",
- "source_port_range": "*"
- },
- {
- "access": "Allow",
- "description": "Allow outbound traffic from all VMs to all VMs in VNET",
- "destination_address_prefix": "VirtualNetwork",
- "destination_port_range": "*",
- "direction": "Outbound",
- "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetOutBound",
- "name": "AllowVnetOutBound",
- "priority": 65000,
- "protocol": "*",
- "provisioning_state": "Succeeded",
- "source_address_prefix": "VirtualNetwork",
- "source_port_range": "*"
- },
- {
- "access": "Allow",
- "description": "Allow outbound traffic from all VMs to Internet",
- "destination_address_prefix": "Internet",
- "destination_port_range": "*",
- "direction": "Outbound",
- "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowInternetOutBound",
- "name": "AllowInternetOutBound",
- "priority": 65001,
- "protocol": "*",
- "provisioning_state": "Succeeded",
- "source_address_prefix": "*",
- "source_port_range": "*"
- },
- {
- "access": "Deny",
- "description": "Deny all outbound traffic",
- "destination_address_prefix": "*",
- "destination_port_range": "*",
- "direction": "Outbound",
- "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllOutBound",
- "name": "DenyAllOutBound",
- "priority": 65500,
- "protocol": "*",
- "provisioning_state": "Succeeded",
- "source_address_prefix": "*",
- "source_port_range": "*"
- }
- ]
- id:
- description:
- - The resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup"
- location:
- description:
- - The resource location.
- returned: always
- type: str
- sample: "westus"
- name:
- description:
- - Name of the security group.
- returned: always
- type: str
- sample: "mysecgroup"
- network_interfaces:
- description:
- - A collection of references to network interfaces.
- returned: always
- type: list
- sample: []
- rules:
- description:
- - A collection of security rules of the network security group.
- returned: always
- type: list
- sample: [
- {
- "access": "Deny",
- "description": null,
- "destination_address_prefix": "*",
- "destination_port_range": "22",
- "direction": "Inbound",
- "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/DenySSH",
- "name": "DenySSH",
- "priority": 100,
- "protocol": "Tcp",
- "provisioning_state": "Succeeded",
- "source_address_prefix": "*",
- "source_port_range": "*"
- },
- {
- "access": "Allow",
- "description": null,
- "destination_address_prefix": "*",
- "destination_port_range": "22",
- "direction": "Inbound",
- "etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/AllowSSH",
- "name": "AllowSSH",
- "priority": 101,
- "protocol": "Tcp",
- "provisioning_state": "Succeeded",
- "source_address_prefix": "174.109.158.0/24",
- "source_port_range": "*"
- }
- ]
- subnets:
- description:
- - A collection of references to subnets.
- returned: always
- type: list
- sample: []
- tags:
- description:
- - Tags to assign to the security group.
- returned: always
- type: dict
- sample: {
- "delete": "on-exit",
- "foo": "bar",
- "testing": "testing"
- }
- type:
- description:
- - The resource type.
- returned: always
- type: str
- sample: "Microsoft.Network/networkSecurityGroups"
-''' # NOQA
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.tools import is_valid_resource_id
- from azure.mgmt.network import NetworkManagementClient
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.six import integer_types
-from ansible.module_utils._text import to_native
-
-
-def validate_rule(self, rule, rule_type=None):
- '''
- Apply defaults to a rule dictionary and check that all values are valid.
-
- :param rule: rule dict
- :param rule_type: Set to 'default' if the rule is part of the default set of rules.
- :return: None
- '''
- priority = rule.get('priority', 0)
- if rule_type != 'default' and (priority < 100 or priority > 4096):
- raise Exception("Rule priority must be between 100 and 4096")
-
- def check_plural(src, dest):
- if isinstance(rule.get(src), list):
- rule[dest] = rule[src]
- rule[src] = None
-
- check_plural('destination_address_prefix', 'destination_address_prefixes')
- check_plural('source_address_prefix', 'source_address_prefixes')
- check_plural('source_port_range', 'source_port_ranges')
- check_plural('destination_port_range', 'destination_port_ranges')
-
- # when source(destination)_application_security_groups set, remove the default value * of source(destination)_address_prefix
- if rule.get('source_application_security_groups') and rule.get('source_address_prefix') == '*':
- rule['source_address_prefix'] = None
- if rule.get('destination_application_security_groups') and rule.get('destination_address_prefix') == '*':
- rule['destination_address_prefix'] = None
-
-
-def compare_rules_change(old_list, new_list, purge_list):
- old_list = old_list or []
- new_list = new_list or []
- changed = False
-
- for old_rule in old_list:
- matched = next((x for x in new_list if x['name'] == old_rule['name']), [])
- if matched: # if the new one is in the old list, check whether it is updated
- changed = changed or compare_rules(old_rule, matched)
- elif not purge_list: # keep this rule
- new_list.append(old_rule)
- else: # one rule is removed
- changed = True
- # Compare new list and old list is the same? here only compare names
- if not changed:
- new_names = [to_native(x['name']) for x in new_list]
- old_names = [to_native(x['name']) for x in old_list]
- changed = (set(new_names) != set(old_names))
- return changed, new_list
-
-
-def compare_rules(old_rule, rule):
- changed = False
- if old_rule['name'] != rule['name']:
- changed = True
- if rule.get('description', None) != old_rule['description']:
- changed = True
- if rule['protocol'] != old_rule['protocol']:
- changed = True
- if str(rule['source_port_range']) != str(old_rule['source_port_range']):
- changed = True
- if str(rule['destination_port_range']) != str(old_rule['destination_port_range']):
- changed = True
- if rule['access'] != old_rule['access']:
- changed = True
- if rule['priority'] != old_rule['priority']:
- changed = True
- if rule['direction'] != old_rule['direction']:
- changed = True
- if str(rule['source_address_prefix']) != str(old_rule['source_address_prefix']):
- changed = True
- if str(rule['destination_address_prefix']) != str(old_rule['destination_address_prefix']):
- changed = True
- if set(rule.get('source_address_prefixes') or []) != set(old_rule.get('source_address_prefixes') or []):
- changed = True
- if set(rule.get('destination_address_prefixes') or []) != set(old_rule.get('destination_address_prefixes') or []):
- changed = True
- if set(rule.get('source_port_ranges') or []) != set(old_rule.get('source_port_ranges') or []):
- changed = True
- if set(rule.get('destination_port_ranges') or []) != set(old_rule.get('destination_port_ranges') or []):
- changed = True
- if set(rule.get('source_application_security_groups') or []) != set(old_rule.get('source_application_security_groups') or []):
- changed = True
- if set(rule.get('destination_application_security_groups') or []) != set(old_rule.get('destination_application_security_groups') or []):
- changed = True
- return changed
-
-
-def create_rule_instance(self, rule):
- '''
- Create an instance of SecurityRule from a dict.
-
- :param rule: dict
- :return: SecurityRule
- '''
- return self.nsg_models.SecurityRule(
- description=rule.get('description', None),
- protocol=rule.get('protocol', None),
- source_port_range=rule.get('source_port_range', None),
- destination_port_range=rule.get('destination_port_range', None),
- source_address_prefix=rule.get('source_address_prefix', None),
- source_address_prefixes=rule.get('source_address_prefixes', None),
- destination_address_prefix=rule.get('destination_address_prefix', None),
- destination_address_prefixes=rule.get('destination_address_prefixes', None),
- source_port_ranges=rule.get('source_port_ranges', None),
- destination_port_ranges=rule.get('destination_port_ranges', None),
- source_application_security_groups=[
- self.nsg_models.ApplicationSecurityGroup(id=p)
- for p in rule.get('source_application_security_groups')] if rule.get('source_application_security_groups') else None,
- destination_application_security_groups=[
- self.nsg_models.ApplicationSecurityGroup(id=p)
- for p in rule.get('destination_application_security_groups')] if rule.get('destination_application_security_groups') else None,
- access=rule.get('access', None),
- priority=rule.get('priority', None),
- direction=rule.get('direction', None),
- provisioning_state=rule.get('provisioning_state', None),
- name=rule.get('name', None),
- etag=rule.get('etag', None)
- )
-
-
-def create_rule_dict_from_obj(rule):
- '''
- Create a dict from an instance of a SecurityRule.
-
- :param rule: SecurityRule
- :return: dict
- '''
- return dict(
- id=rule.id,
- name=rule.name,
- description=rule.description,
- protocol=rule.protocol,
- source_port_range=rule.source_port_range,
- destination_port_range=rule.destination_port_range,
- source_address_prefix=rule.source_address_prefix,
- destination_address_prefix=rule.destination_address_prefix,
- source_port_ranges=rule.source_port_ranges,
- destination_port_ranges=rule.destination_port_ranges,
- source_address_prefixes=rule.source_address_prefixes,
- destination_address_prefixes=rule.destination_address_prefixes,
- source_application_security_groups=[p.id for p in rule.source_application_security_groups] if rule.source_application_security_groups else None,
- destination_application_security_groups=[
- p.id for p in rule.destination_application_security_groups] if rule.destination_application_security_groups else None,
- access=rule.access,
- priority=rule.priority,
- direction=rule.direction,
- provisioning_state=rule.provisioning_state,
- etag=rule.etag
- )
-
-
-def create_network_security_group_dict(nsg):
- results = dict(
- id=nsg.id,
- name=nsg.name,
- type=nsg.type,
- location=nsg.location,
- tags=nsg.tags,
- )
- results['rules'] = []
- if nsg.security_rules:
- for rule in nsg.security_rules:
- results['rules'].append(create_rule_dict_from_obj(rule))
-
- results['default_rules'] = []
- if nsg.default_security_rules:
- for rule in nsg.default_security_rules:
- results['default_rules'].append(create_rule_dict_from_obj(rule))
-
- results['network_interfaces'] = []
- if nsg.network_interfaces:
- for interface in nsg.network_interfaces:
- results['network_interfaces'].append(interface.id)
-
- results['subnets'] = []
- if nsg.subnets:
- for subnet in nsg.subnets:
- results['subnets'].append(subnet.id)
-
- return results
-
-
-rule_spec = dict(
- name=dict(type='str', required=True),
- description=dict(type='str'),
- protocol=dict(type='str', choices=['Udp', 'Tcp', '*'], default='*'),
- source_port_range=dict(type='raw', default='*'),
- destination_port_range=dict(type='raw', default='*'),
- source_address_prefix=dict(type='raw', default='*'),
- destination_address_prefix=dict(type='raw', default='*'),
- source_application_security_groups=dict(type='list', elements='raw'),
- destination_application_security_groups=dict(type='list', elements='raw'),
- access=dict(type='str', choices=['Allow', 'Deny'], default='Allow'),
- priority=dict(type='int', required=True),
- direction=dict(type='str', choices=['Inbound', 'Outbound'], default='Inbound')
-)
-
-
-class AzureRMSecurityGroup(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- default_rules=dict(type='list', elements='dict', options=rule_spec),
- location=dict(type='str'),
- name=dict(type='str', required=True),
- purge_default_rules=dict(type='bool', default=False),
- purge_rules=dict(type='bool', default=False),
- resource_group=dict(required=True, type='str'),
- rules=dict(type='list', elements='dict', options=rule_spec),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- )
-
- self.default_rules = None
- self.location = None
- self.name = None
- self.purge_default_rules = None
- self.purge_rules = None
- self.resource_group = None
- self.rules = None
- self.state = None
- self.tags = None
- self.nsg_models = None # type: azure.mgmt.network.models
-
- self.results = dict(
- changed=False,
- state=dict()
- )
-
- mutually_exclusive = [["source_application_security_group", "source_address_prefix"],
- ["source_application_security_group", "source_address_prefixes"],
- ["destination_application_security_group", "destination_address_prefix"],
- ["destination_application_security_group", "destination_address_prefixes"]]
-
- super(AzureRMSecurityGroup, self).__init__(self.module_arg_spec,
- supports_check_mode=True,
- mutually_exclusive=mutually_exclusive)
-
- def exec_module(self, **kwargs):
- # tighten up poll interval for security groups; default 30s is an eternity
- # this value is still overridden by the response Retry-After header (which is set on the initial operation response to 10s)
- self.network_client.config.long_running_operation_timeout = 3
- self.nsg_models = self.network_client.network_security_groups.models
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- changed = False
- results = dict()
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- # Set default location
- self.location = resource_group.location
-
- if self.rules:
- for rule in self.rules:
- try:
- validate_rule(self, rule)
- except Exception as exc:
- self.fail("Error validating rule {0} - {1}".format(rule, str(exc)))
- self.convert_asg_to_id(rule)
-
- if self.default_rules:
- for rule in self.default_rules:
- try:
- validate_rule(self, rule, 'default')
- except Exception as exc:
- self.fail("Error validating default rule {0} - {1}".format(rule, str(exc)))
- self.convert_asg_to_id(rule)
-
- try:
- nsg = self.network_client.network_security_groups.get(self.resource_group, self.name)
- results = create_network_security_group_dict(nsg)
- self.log("Found security group:")
- self.log(results, pretty_print=True)
- self.check_provisioning_state(nsg, self.state)
- if self.state == 'present':
- pass
- elif self.state == 'absent':
- self.log("CHANGED: security group found but state is 'absent'")
- changed = True
- except CloudError: # TODO: actually check for ResourceMissingError
- if self.state == 'present':
- self.log("CHANGED: security group not found and state is 'present'")
- changed = True
-
- if self.state == 'present' and not changed:
- # update the security group
- self.log("Update security group {0}".format(self.name))
-
- update_tags, results['tags'] = self.update_tags(results['tags'])
- if update_tags:
- changed = True
-
- rule_changed, new_rule = compare_rules_change(results['rules'], self.rules, self.purge_rules)
- if rule_changed:
- changed = True
- results['rules'] = new_rule
- rule_changed, new_rule = compare_rules_change(results['default_rules'], self.default_rules, self.purge_default_rules)
- if rule_changed:
- changed = True
- results['default_rules'] = new_rule
-
- self.results['changed'] = changed
- self.results['state'] = results
- if not self.check_mode and changed:
- self.results['state'] = self.create_or_update(results)
-
- elif self.state == 'present' and changed:
- # create the security group
- self.log("Create security group {0}".format(self.name))
-
- if not self.location:
- self.fail("Parameter error: location required when creating a security group.")
-
- results['name'] = self.name
- results['location'] = self.location
- results['rules'] = []
- results['default_rules'] = []
- results['tags'] = {}
-
- if self.rules:
- results['rules'] = self.rules
- if self.default_rules:
- results['default_rules'] = self.default_rules
- if self.tags:
- results['tags'] = self.tags
-
- self.results['changed'] = changed
- self.results['state'] = results
- if not self.check_mode:
- self.results['state'] = self.create_or_update(results)
-
- elif self.state == 'absent' and changed:
- self.log("Delete security group {0}".format(self.name))
- self.results['changed'] = changed
- self.results['state'] = dict()
- if not self.check_mode:
- self.delete()
- # the delete does not actually return anything. if no exception, then we'll assume
- # it worked.
- self.results['state']['status'] = 'Deleted'
-
- return self.results
-
- def create_or_update(self, results):
- parameters = self.nsg_models.NetworkSecurityGroup()
- if results.get('rules'):
- parameters.security_rules = []
- for rule in results.get('rules'):
- parameters.security_rules.append(create_rule_instance(self, rule))
- if results.get('default_rules'):
- parameters.default_security_rules = []
- for rule in results.get('default_rules'):
- parameters.default_security_rules.append(create_rule_instance(self, rule))
- parameters.tags = results.get('tags')
- parameters.location = results.get('location')
-
- try:
- poller = self.network_client.network_security_groups.create_or_update(resource_group_name=self.resource_group,
- network_security_group_name=self.name,
- parameters=parameters)
- result = self.get_poller_result(poller)
- except CloudError as exc:
- self.fail("Error creating/updating security group {0} - {1}".format(self.name, str(exc)))
- return create_network_security_group_dict(result)
-
- def delete(self):
- try:
- poller = self.network_client.network_security_groups.delete(resource_group_name=self.resource_group, network_security_group_name=self.name)
- result = self.get_poller_result(poller)
- except CloudError as exc:
- self.fail("Error deleting security group {0} - {1}".format(self.name, str(exc)))
-
- return result
-
- def convert_asg_to_id(self, rule):
- def convert_to_id(rule, key):
- if rule.get(key):
- ids = []
- for p in rule.get(key):
- if isinstance(p, dict):
- ids.append("/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationSecurityGroups/{2}".format(
- self.subscription_id, p.get('resource_group'), p.get('name')))
- elif isinstance(p, str):
- if is_valid_resource_id(p):
- ids.append(p)
- else:
- ids.append("/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/applicationSecurityGroups/{2}".format(
- self.subscription_id, self.resource_group, p))
- rule[key] = ids
- convert_to_id(rule, 'source_application_security_groups')
- convert_to_id(rule, 'destination_application_security_groups')
-
-
-def main():
- AzureRMSecurityGroup()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_securitygroup_info.py b/lib/ansible/modules/cloud/azure/azure_rm_securitygroup_info.py
deleted file mode 100644
index e9acb5266b..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_securitygroup_info.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_securitygroup_info
-
-version_added: "2.9"
-
-short_description: Get security group facts
-
-description:
- - Get facts for a specific security group or all security groups within a resource group.
-
-options:
- name:
- description:
- - Only show results for a specific security group.
- resource_group:
- description:
- - Name of the resource group to use.
- required: true
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Chris Houseknecht (@chouseknecht)
- - Matt Davis (@nitzmahone)
-
-'''
-
-EXAMPLES = '''
- - name: Get facts for one security group
- azure_rm_securitygroup_info:
- resource_group: myResourceGroup
- name: secgroup001
-
- - name: Get facts for all security groups
- azure_rm_securitygroup_info:
- resource_group: myResourceGroup
-
-'''
-
-RETURN = '''
-securitygroups:
- description:
- - List containing security group dicts.
- returned: always
- type: complex
- contains:
- etag:
- description:
- - A unique read-only string that changes whenever the resource is updated.
- returned: always
- type: str
- sample: 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"'
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/secgroup001"
- location:
- description:
- - Resource location.
- returned: always
- type: str
- sample: "eastus2"
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: "secgroup001"
- properties:
- description:
- - List of security group's properties.
- returned: always
- type: dict
- sample: {
- "defaultSecurityRules": [
- {
- "etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/AllowVnetInBound",
- "name": "AllowVnetInBound",
- "properties": {
- "access": "Allow",
- "description": "Allow inbound traffic from all VMs in VNET",
- "destinationAddressPrefix": "VirtualNetwork",
- "destinationPortRange": "*",
- "direction": "Inbound",
- "priority": 65000,
- "protocol": "*",
- "provisioningState": "Succeeded",
- "sourceAddressPrefix": "VirtualNetwork",
- "sourcePortRange": "*"
- }
- },
- {
- "etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/AllowAzureLoadBalancerInBound",
- "name": "AllowAzureLoadBalancerInBound",
- "properties": {
- "access": "Allow",
- "description": "Allow inbound traffic from azure load balancer",
- "destinationAddressPrefix": "*",
- "destinationPortRange": "*",
- "direction": "Inbound",
- "priority": 65001,
- "protocol": "*",
- "provisioningState": "Succeeded",
- "sourceAddressPrefix": "AzureLoadBalancer",
- "sourcePortRange": "*"
- }
- },
- {
- "etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/DenyAllInBound",
- "name": "DenyAllInBound",
- "properties": {
- "access": "Deny",
- "description": "Deny all inbound traffic",
- "destinationAddressPrefix": "*",
- "destinationPortRange": "*",
- "direction": "Inbound",
- "priority": 65500,
- "protocol": "*",
- "provisioningState": "Succeeded",
- "sourceAddressPrefix": "*",
- "sourcePortRange": "*"
- }
- },
- {
- "etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/AllowVnetOutBound",
- "name": "AllowVnetOutBound",
- "properties": {
- "access": "Allow",
- "description": "Allow outbound traffic from all VMs to all VMs in VNET",
- "destinationAddressPrefix": "VirtualNetwork",
- "destinationPortRange": "*",
- "direction": "Outbound",
- "priority": 65000,
- "protocol": "*",
- "provisioningState": "Succeeded",
- "sourceAddressPrefix": "VirtualNetwork",
- "sourcePortRange": "*"
- }
- },
- {
- "etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/AllowInternetOutBound",
- "name": "AllowInternetOutBound",
- "properties": {
- "access": "Allow",
- "description": "Allow outbound traffic from all VMs to Internet",
- "destinationAddressPrefix": "Internet",
- "destinationPortRange": "*",
- "direction": "Outbound",
- "priority": 65001,
- "protocol": "*",
- "provisioningState": "Succeeded",
- "sourceAddressPrefix": "*",
- "sourcePortRange": "*"
- }
- },
- {
- "etag": 'W/"d036f4d7-d977-429a-a8c6-879bc2523399"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/secgroup001/defaultSecurityRules/DenyAllOutBound",
- "name": "DenyAllOutBound",
- "properties": {
- "access": "Deny",
- "description": "Deny all outbound traffic",
- "destinationAddressPrefix": "*",
- "destinationPortRange": "*",
- "direction": "Outbound",
- "priority": 65500,
- "protocol": "*",
- "provisioningState": "Succeeded",
- "sourceAddressPrefix": "*",
- "sourcePortRange": "*"
- }
- }
- ],
- "networkInterfaces": [
- {
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkInterfaces/nic004"
- }
- ],
- "provisioningState": "Succeeded",
- "resourceGuid": "ebd00afa-5dc8-446f-810a-50dd6f671588",
- "securityRules": []
- }
- tags:
- description:
- - Tags to assign to the security group.
- returned: always
- type: dict
- sample: { 'tag': 'value' }
- type:
- description:
- - Type of the resource.
- returned: always
- type: str
- sample: "Microsoft.Network/networkSecurityGroups"
-
-''' # NOQA
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-
-AZURE_OBJECT_CLASS = 'NetworkSecurityGroup'
-
-
-class AzureRMSecurityGroupInfo(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str'),
- resource_group=dict(required=True, type='str'),
- tags=dict(type='list'),
- )
-
- self.results = dict(
- changed=False,
- )
-
- self.name = None
- self.resource_group = None
- self.tags = None
-
- super(AzureRMSecurityGroupInfo, self).__init__(self.module_arg_spec,
- supports_tags=False,
- facts_module=True)
-
- def exec_module(self, **kwargs):
-
- is_old_facts = self.module._name == 'azure_rm_securitygroup_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_securitygroup_facts' module has been renamed to 'azure_rm_securitygroup_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name is not None:
- info = self.get_item()
- else:
- info = self.list_items()
-
- if is_old_facts:
- self.results['ansible_facts'] = {
- 'azure_securitygroups': info
- }
- self.results['securitygroups'] = info
-
- return self.results
-
- def get_item(self):
- self.log('Get properties for {0}'.format(self.name))
- item = None
- result = []
-
- try:
- item = self.network_client.network_security_groups.get(self.resource_group, self.name)
- except CloudError:
- pass
-
- if item and self.has_tags(item.tags, self.tags):
- grp = self.serialize_obj(item, AZURE_OBJECT_CLASS)
- grp['name'] = item.name
- result = [grp]
-
- return result
-
- def list_items(self):
- self.log('List all items')
- try:
- response = self.network_client.network_security_groups.list(self.resource_group)
- except Exception as exc:
- self.fail("Error listing all items - {0}".format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- grp = self.serialize_obj(item, AZURE_OBJECT_CLASS)
- grp['name'] = item.name
- results.append(grp)
- return results
-
-
-def main():
- AzureRMSecurityGroupInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_servicebus.py b/lib/ansible/modules/cloud/azure/azure_rm_servicebus.py
deleted file mode 100644
index 2975103a41..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_servicebus.py
+++ /dev/null
@@ -1,207 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_servicebus
-version_added: "2.8"
-short_description: Manage Azure Service Bus
-description:
- - Create, update or delete an Azure Service Bus namespaces.
-options:
- resource_group:
- description:
- - Name of resource group.
- required: true
- name:
- description:
- - Name of the servicebus namespace.
- required: true
- state:
- description:
- - Assert the state of the servicebus. Use C(present) to create or update and use C(absen) to delete.
- default: present
- choices:
- - absent
- - present
- location:
- description:
- - The servicebus's location.
- sku:
- description:
- - Namespace SKU.
- choices:
- - standard
- - basic
- - premium
- default: standard
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
-- name: Create a namespace
- azure_rm_servicebus:
- name: deadbeef
- location: eastus
-'''
-RETURN = '''
-id:
- description:
- - Current state of the service bus.
- returned: success
- type: str
- sample: "/subscriptions/xxx...xxx/resourceGroups/myResourceGroup/providers/Microsoft.ServiceBus/namespaces/myServicebus"
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
-from ansible.module_utils._text import to_native
-from datetime import datetime, timedelta
-
-
-class AzureRMServiceBus(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- location=dict(type='str'),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- sku=dict(type='str', choices=['basic', 'standard', 'premium'], default='standard')
- )
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.sku = None
- self.location = None
-
- self.results = dict(
- changed=False,
- id=None
- )
-
- super(AzureRMServiceBus, self).__init__(self.module_arg_spec,
- supports_check_mode=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()):
- setattr(self, key, kwargs[key])
-
- changed = False
-
- if not self.location:
- resource_group = self.get_resource_group(self.resource_group)
- self.location = resource_group.location
-
- original = self.get()
- if self.state == 'present' and not original:
- self.check_name()
- changed = True
- if not self.check_mode:
- original = self.create()
- elif self.state == 'absent' and original:
- changed = True
- original = None
- if not self.check_mode:
- self.delete()
- self.results['deleted'] = True
-
- if original:
- self.results = self.to_dict(original)
- self.results['changed'] = changed
- return self.results
-
- def check_name(self):
- try:
- check_name = self.servicebus_client.namespaces.check_name_availability_method(self.name)
- if not check_name or not check_name.name_available:
- self.fail("Error creating namespace {0} - {1}".format(self.name, check_name.message or str(check_name)))
- except Exception as exc:
- self.fail("Error creating namespace {0} - {1}".format(self.name, exc.message or str(exc)))
-
- def create(self):
- self.log('Cannot find namespace, creating a one')
- try:
- sku = self.servicebus_models.SBSku(name=str.capitalize(self.sku))
- poller = self.servicebus_client.namespaces.create_or_update(self.resource_group,
- self.name,
- self.servicebus_models.SBNamespace(location=self.location,
- sku=sku))
- ns = self.get_poller_result(poller)
- except Exception as exc:
- self.fail('Error creating namespace {0} - {1}'.format(self.name, str(exc.inner_exception) or str(exc)))
- return ns
-
- def delete(self):
- try:
- self.servicebus_client.namespaces.delete(self.resource_group, self.name)
- return True
- except Exception as exc:
- self.fail("Error deleting route {0} - {1}".format(self.name, str(exc)))
-
- def get(self):
- try:
- return self.servicebus_client.namespaces.get(self.resource_group, self.name)
- except Exception:
- return None
-
- def to_dict(self, instance):
- result = dict()
- attribute_map = self.servicebus_models.SBNamespace._attribute_map
- for attribute in attribute_map.keys():
- value = getattr(instance, attribute)
- if not value:
- continue
- if isinstance(value, self.servicebus_models.SBSku):
- result[attribute] = value.name.lower()
- elif isinstance(value, datetime):
- result[attribute] = str(value)
- elif isinstance(value, str):
- result[attribute] = to_native(value)
- elif attribute == 'max_size_in_megabytes':
- result['max_size_in_mb'] = value
- else:
- result[attribute] = value
- return result
-
-
-def is_valid_timedelta(value):
- if value == timedelta(10675199, 10085, 477581):
- return None
- return value
-
-
-def main():
- AzureRMServiceBus()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_servicebus_info.py b/lib/ansible/modules/cloud/azure/azure_rm_servicebus_info.py
deleted file mode 100644
index 7e91e40244..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_servicebus_info.py
+++ /dev/null
@@ -1,584 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_servicebus_info
-
-version_added: "2.9"
-
-short_description: Get servicebus facts
-
-description:
- - Get facts for a specific servicebus or all servicebus in a resource group or subscription.
-
-options:
- name:
- description:
- - Limit results to a specific servicebus.
- resource_group:
- description:
- - Limit results in a specific resource group.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- namespace:
- description:
- - Servicebus namespace name.
- - A namespace is a scoping container for all messaging components.
- - Multiple queues and topics can reside within a single namespace, and namespaces often serve as application containers.
- - Required when I(type=namespace).
- type:
- description:
- - Type of the resource.
- choices:
- - namespace
- - queue
- - topic
- - subscription
- topic:
- description:
- - Topic name.
- - Required when I(type=subscription).
- show_sas_policies:
- description:
- - Whether to show the SAS policies.
- - Not support when I(type=subscription).
- - Note if enable this option, the facts module will raise two more HTTP call for each resources, need more network overhead.
- type: bool
-extends_documentation_fragment:
- - azure
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
-- name: Get all namespaces under a resource group
- azure_rm_servicebus_info:
- resource_group: myResourceGroup
- type: namespace
-
-- name: Get all topics under a namespace
- azure_rm_servicebus_info:
- resource_group: myResourceGroup
- namespace: bar
- type: topic
-
-- name: Get a single queue with SAS policies
- azure_rm_servicebus_info:
- resource_group: myResourceGroup
- namespace: bar
- type: queue
- name: sbqueue
- show_sas_policies: true
-
-- name: Get all subscriptions under a resource group
- azure_rm_servicebus_info:
- resource_group: myResourceGroup
- type: subscription
- namespace: bar
- topic: sbtopic
-'''
-RETURN = '''
-servicebuses:
- description:
- - List of servicebus dicts.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/foo/providers/Microsoft.ServiceBus/
- namespaces/bar/topics/baz/subscriptions/qux"
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: qux
- location:
- description:
- - The Geo-location where the resource lives.
- returned: always
- type: str
- sample: eastus
- namespace:
- description:
- - I(namespace) name of the C(queue) or C(topic), C(subscription).
- returned: always
- type: str
- sample: bar
- topic:
- description:
- - Topic name of a subscription.
- returned: always
- type: str
- sample: baz
- tags:
- description:
- - Resource tags.
- returned: always
- type: dict
- sample: {env: sandbox}
- sku:
- description:
- - Properties of namespace's SKU.
- returned: always
- type: str
- sample: Standard
- provisioning_state:
- description:
- - Provisioning state of the namespace.
- returned: always
- type: str
- sample: Succeeded
- service_bus_endpoint:
- description:
- - Endpoint you can use to perform Service Bus operations.
- returned: always
- type: str
- sample: "https://bar.servicebus.windows.net:443/"
- metric_id:
- description:
- - Identifier for Azure Insights metrics of namespace.
- returned: always
- type: str
- sample: "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX:bar"
- type:
- description:
- - Resource type.
- - Namespace is a scoping container for all messaging components.
- - Queue enables you to store messages until the receiving application is available to receive and process them.
- - Topic and subscriptions enable 1:n relationships between publishers and subscribers.
- returned: always
- type: str
- sample: "Microsoft.ServiceBus/Namespaces/Topics"
- created_at:
- description:
- - Exact time the message was created.
- returned: always
- type: str
- sample: "2019-01-25 02:46:55.543953+00:00"
- updated_at:
- description:
- - The exact time the message was updated.
- returned: always
- type: str
- sample: "2019-01-25 02:46:55.543953+00:00"
- accessed_at:
- description:
- - Last time the message was sent, or a request was received for this topic.
- returned: always
- type: str
- sample: "2019-01-25 02:46:55.543953+00:00"
- subscription_count:
- description:
- - Number of subscriptions under a topic.
- returned: always
- type: int
- sample: 1
- count_details:
- description:
- - Message count details.
- returned: always
- type: complex
- contains:
- active_message_count:
- description:
- - Number of active messages in the C(queue), C(topic), or C(subscription).
- returned: always
- type: int
- sample: 0
- dead_letter_message_count:
- description:
- - Number of messages that are dead lettered.
- returned: always
- type: int
- sample: 0
- scheduled_message_count:
- description:
- - Number of scheduled messages.
- returned: always
- type: int
- sample: 0
- transfer_message_count:
- description:
- - Number of messages transferred to another C(queue), C(topic), or C(subscription).
- returned: always
- type: int
- sample: 0
- transfer_dead_letter_message_count:
- description:
- - Number of messages transferred into dead letters.
- returned: always
- type: int
- sample: 0
- support_ordering:
- description:
- - Value that indicates whether the C(topic) supports ordering.
- returned: always
- type: bool
- sample: true
- status:
- description:
- - The status of a messaging entity.
- returned: always
- type: str
- sample: active
- requires_session:
- description:
- - A value that indicates whether the C(queue) or C(topic) supports the concept of sessions.
- returned: always
- type: bool
- sample: true
- requires_duplicate_detection:
- description:
- - A value indicating if this C(queue) or C(topic) requires duplicate detection.
- returned: always
- type: bool
- sample: true
- max_size_in_mb:
- description:
- - Maximum size of the C(queue) or C(topic) in megabytes, which is the size of the memory allocated for the C(topic).
- returned: always
- type: int
- sample: 5120
- max_delivery_count:
- description:
- - The maximum delivery count.
- - A message is automatically deadlettered after this number of deliveries.
- returned: always
- type: int
- sample: 10
- lock_duration_in_seconds:
- description:
- - ISO 8601 timespan duration of a peek-lock.
- - The amount of time that the message is locked for other receivers.
- - The maximum value for LockDuration is 5 minutes.
- returned: always
- type: int
- sample: 60
- forward_to:
- description:
- - C(queue) or C(topic) name to forward the messages.
- returned: always
- type: str
- sample: quux
- forward_dead_lettered_messages_to:
- description:
- - C(queue) or C(topic) name to forward the Dead Letter message.
- returned: always
- type: str
- sample: corge
- enable_partitioning:
- description:
- - Value that indicates whether the C(queue) or C(topic) to be partitioned across multiple message brokers is enabled.
- returned: always
- type: bool
- sample: true
- enable_express:
- description:
- - Value that indicates whether Express Entities are enabled.
- - An express topic holds a message in memory temporarily before writing it to persistent storage.
- returned: always
- type: bool
- sample: true
- enable_batched_operations:
- description:
- - Value that indicates whether server-side batched operations are enabled.
- returned: always
- type: bool
- sample: true
- duplicate_detection_time_in_seconds:
- description:
- - ISO 8601 timeSpan structure that defines the duration of the duplicate detection history.
- returned: always
- type: int
- sample: 600
- default_message_time_to_live_seconds:
- description:
- - ISO 8061 Default message timespan to live value.
- - This is the duration after which the message expires, starting from when the message is sent to Service Bus.
- - This is the default value used when TimeToLive is not set on a message itself.
- returned: always
- type: int
- sample: 0
- dead_lettering_on_message_expiration:
- description:
- - A value that indicates whether this C(queue) or C(topic) has dead letter support when a message expires.
- returned: always
- type: int
- sample: 0
- dead_lettering_on_filter_evaluation_exceptions:
- description:
- - Value that indicates whether a subscription has dead letter support on filter evaluation exceptions.
- returned: always
- type: int
- sample: 0
- auto_delete_on_idle_in_seconds:
- description:
- - ISO 8061 timeSpan idle interval after which the queue or topic is automatically deleted.
- - The minimum duration is 5 minutes.
- returned: always
- type: int
- sample: true
- size_in_bytes:
- description:
- - The size of the C(queue) or C(topic) in bytes.
- returned: always
- type: int
- sample: 0
- message_count:
- description:
- - Number of messages.
- returned: always
- type: int
- sample: 10
- sas_policies:
- description:
- - Dict of SAS policies.
- - Will not be returned until I(show_sas_policy) set.
- returned: always
- type: dict
- sample: {
- "testpolicy1": {
- "id": "/subscriptions/XXXXXXXX-XXXX-XXXX-XXXXXXXXXXXX/resourceGroups/
- foo/providers/Microsoft.ServiceBus/namespaces/bar/queues/qux/authorizationRules/testpolicy1",
- "keys": {
- "key_name": "testpolicy1",
- "primary_connection_string": "Endpoint=sb://bar.servicebus.windows.net/;
- SharedAccessKeyName=testpolicy1;SharedAccessKey=XXXXXXXXXXXXXXXXX;EntityPath=qux",
- "primary_key": "XXXXXXXXXXXXXXXXX",
- "secondary_connection_string": "Endpoint=sb://bar.servicebus.windows.net/;
- SharedAccessKeyName=testpolicy1;SharedAccessKey=XXXXXXXXXXXXXXX;EntityPath=qux",
- "secondary_key": "XXXXXXXXXXXXXXX"
- },
- "name": "testpolicy1",
- "rights": "listen_send",
- "type": "Microsoft.ServiceBus/Namespaces/Queues/AuthorizationRules"
- }
- }
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict
-from ansible.module_utils.common.dict_transformations import _camel_to_snake
-from ansible.module_utils._text import to_native
-from datetime import datetime, timedelta
-
-duration_spec_map = dict(
- default_message_time_to_live='default_message_time_to_live_seconds',
- duplicate_detection_history_time_window='duplicate_detection_time_in_seconds',
- auto_delete_on_idle='auto_delete_on_idle_in_seconds',
- lock_duration='lock_duration_in_seconds'
-)
-
-
-def is_valid_timedelta(value):
- if value == timedelta(10675199, 10085, 477581):
- return None
- return value
-
-
-class AzureRMServiceBusInfo(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str'),
- resource_group=dict(type='str'),
- tags=dict(type='list'),
- type=dict(type='str', required=True, choices=['namespace', 'topic', 'queue', 'subscription']),
- namespace=dict(type='str'),
- topic=dict(type='str'),
- show_sas_policies=dict(type='bool')
- )
-
- required_if = [
- ('type', 'subscription', ['topic', 'resource_group', 'namespace']),
- ('type', 'topic', ['resource_group', 'namespace']),
- ('type', 'queue', ['resource_group', 'namespace'])
- ]
-
- self.results = dict(
- changed=False,
- servicebuses=[]
- )
-
- self.name = None
- self.resource_group = None
- self.tags = None
- self.type = None
- self.namespace = None
- self.topic = None
- self.show_sas_policies = None
-
- super(AzureRMServiceBusInfo, self).__init__(self.module_arg_spec,
- supports_tags=False,
- required_if=required_if,
- facts_module=True)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_servicebus_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_servicebus_facts' module has been renamed to 'azure_rm_servicebus_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- response = []
- if self.name:
- response = self.get_item()
- elif self.resource_group:
- response = self.list_items()
- else:
- response = self.list_all_items()
-
- self.results['servicebuses'] = [self.instance_to_dict(x) for x in response]
- return self.results
-
- def instance_to_dict(self, instance):
- result = dict()
- instance_type = getattr(self.servicebus_models, 'SB{0}'.format(str.capitalize(self.type)))
- attribute_map = instance_type._attribute_map
- for attribute in attribute_map.keys():
- value = getattr(instance, attribute)
- if attribute_map[attribute]['type'] == 'duration':
- if is_valid_timedelta(value):
- key = duration_spec_map.get(attribute) or attribute
- result[key] = int(value.total_seconds())
- elif attribute == 'status':
- result['status'] = _camel_to_snake(value)
- elif isinstance(value, self.servicebus_models.MessageCountDetails):
- result[attribute] = value.as_dict()
- elif isinstance(value, self.servicebus_models.SBSku):
- result[attribute] = value.name.lower()
- elif isinstance(value, datetime):
- result[attribute] = str(value)
- elif isinstance(value, str):
- result[attribute] = to_native(value)
- elif attribute == 'max_size_in_megabytes':
- result['max_size_in_mb'] = value
- else:
- result[attribute] = value
- if self.show_sas_policies and self.type != 'subscription':
- policies = self.get_auth_rules()
- for name in policies.keys():
- policies[name]['keys'] = self.get_sas_key(name)
- result['sas_policies'] = policies
- if self.namespace:
- result['namespace'] = self.namespace
- if self.topic:
- result['topic'] = self.topic
- return result
-
- def _get_client(self):
- return getattr(self.servicebus_client, '{0}s'.format(self.type))
-
- def get_item(self):
- try:
- client = self._get_client()
- if self.type == 'namespace':
- item = client.get(self.resource_group, self.name)
- return [item] if self.has_tags(item.tags, self.tags) else []
- elif self.type == 'subscription':
- return [client.get(self.resource_group, self.namespace, self.topic, self.name)]
- else:
- return [client.get(self.resource_group, self.namespace, self.name)]
- except Exception:
- pass
- return []
-
- def list_items(self):
- try:
- client = self._get_client()
- if self.type == 'namespace':
- response = client.list_by_resource_group(self.resource_group)
- return [x for x in response if self.has_tags(x.tags, self.tags)]
- elif self.type == 'subscription':
- return client.list_by_topic(self.resource_group, self.namespace, self.topic)
- else:
- return client.list_by_namespace(self.resource_group, self.namespace)
- except CloudError as exc:
- self.fail("Failed to list items - {0}".format(str(exc)))
- return []
-
- def list_all_items(self):
- self.log("List all items in subscription")
- try:
- if self.type != 'namespace':
- return []
- response = self.servicebus_client.namespaces.list()
- return [x for x in response if self.has_tags(x.tags, self.tags)]
- except CloudError as exc:
- self.fail("Failed to list all items - {0}".format(str(exc)))
- return []
-
- def get_auth_rules(self):
- result = dict()
- try:
- client = self._get_client()
- if self.type == 'namespace':
- rules = client.list_authorization_rules(self.resource_group, self.name)
- else:
- rules = client.list_authorization_rules(self.resource_group, self.namespace, self.name)
- while True:
- rule = rules.next()
- result[rule.name] = self.policy_to_dict(rule)
- except StopIteration:
- pass
- except Exception as exc:
- self.fail('Error when getting SAS policies for {0} {1}: {2}'.format(self.type, self.name, exc.message or str(exc)))
- return result
-
- def get_sas_key(self, name):
- try:
- client = self._get_client()
- if self.type == 'namespace':
- return client.list_keys(self.resource_group, self.name, name).as_dict()
- else:
- return client.list_keys(self.resource_group, self.namespace, self.name, name).as_dict()
- except Exception as exc:
- self.fail('Error when getting SAS policy {0}\'s key - {1}'.format(name, exc.message or str(exc)))
- return None
-
- def policy_to_dict(self, rule):
- result = rule.as_dict()
- rights = result['rights']
- if 'Manage' in rights:
- result['rights'] = 'manage'
- elif 'Listen' in rights and 'Send' in rights:
- result['rights'] = 'listen_send'
- else:
- result['rights'] = rights[0].lower()
- return result
-
-
-def main():
- AzureRMServiceBusInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_servicebusqueue.py b/lib/ansible/modules/cloud/azure/azure_rm_servicebusqueue.py
deleted file mode 100644
index e15b3fc50a..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_servicebusqueue.py
+++ /dev/null
@@ -1,339 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_servicebusqueue
-version_added: "2.8"
-short_description: Manage Azure Service Bus queue
-description:
- - Create, update or delete an Azure Service Bus queue.
-options:
- resource_group:
- description:
- - Name of resource group.
- required: true
- name:
- description:
- - Name of the queue.
- required: true
- namespace:
- description:
- - Servicebus namespace name.
- - A namespace is a scoping container for all messaging components.
- - Multiple queues and topics can reside within a single namespace, and namespaces often serve as application containers.
- required: true
- state:
- description:
- - Assert the state of the queue. Use C(present) to create or update and use C(absent) to delete.
- default: present
- choices:
- - absent
- - present
- auto_delete_on_idle_in_seconds:
- description:
- - Time idle interval after which a queue is automatically deleted.
- - The minimum duration is 5 minutes.
- type: int
- dead_lettering_on_message_expiration:
- description:
- - A value that indicates whether a queue has dead letter support when a message expires.
- type: bool
- default_message_time_to_live_seconds:
- description:
- - Default message timespan to live value.
- - This is the duration after which the message expires, starting from when the message is sent to Service Bus.
- - This is the default value used when TimeToLive is not set on a message itself.
- type: int
- enable_batched_operations:
- description:
- - Value that indicates whether server-side batched operations are enabled.
- type: bool
- enable_express:
- description:
- - Value that indicates whether Express Entities are enabled.
- - An express topic or queue holds a message in memory temporarily before writing it to persistent storage.
- type: bool
- enable_partitioning:
- description:
- - A value that indicates whether the topic or queue is to be partitioned across multiple message brokers.
- type: bool
- forward_dead_lettered_messages_to:
- description:
- - Queue or topic name to forward the Dead Letter message for a queue.
- forward_to:
- description:
- - Queue or topic name to forward the messages for a queue.
- lock_duration_in_seconds:
- description:
- - Timespan duration of a peek-lock.
- - The amount of time that the message is locked for other receivers.
- - The maximum value for LockDuration is 5 minutes.
- type: int
- max_delivery_count:
- description:
- - The maximum delivery count.
- - A message is automatically deadlettered after this number of deliveries.
- type: int
- max_size_in_mb:
- description:
- - The maximum size of the queue in megabytes, which is the size of memory allocated for the queue.
- type: int
- requires_duplicate_detection:
- description:
- - A value indicating if this queue or topic requires duplicate detection.
- type: bool
- duplicate_detection_time_in_seconds:
- description:
- - TimeSpan structure that defines the duration of the duplicate detection history.
- type: int
- requires_session:
- description:
- - A value that indicates whether the queue supports the concept of sessions.
- type: bool
- status:
- description:
- - Status of the entity.
- choices:
- - active
- - disabled
- - send_disabled
- - receive_disabled
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
-- name: Create a queue
- azure_rm_servicebusqueue:
- name: subqueue
- resource_group: myResourceGroup
- namespace: bar
- duplicate_detection_time_in_seconds: 600
-'''
-RETURN = '''
-id:
- description:
- - Current state of the queue.
- returned: success
- type: str
- sample: "/subscriptions/xxx...xxx/resourceGroups/v-xisuRG/providers/Microsoft.ServiceBus/namespaces/nsb57dc9561/queues/queueb57dc9561"
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
-from ansible.module_utils._text import to_native
-from datetime import datetime, timedelta
-
-
-duration_spec_map = dict(
- default_message_time_to_live='default_message_time_to_live_seconds',
- duplicate_detection_history_time_window='duplicate_detection_time_in_seconds',
- auto_delete_on_idle='auto_delete_on_idle_in_seconds',
- lock_duration='lock_duration_in_seconds'
-)
-
-
-sas_policy_spec = dict(
- state=dict(type='str', default='present', choices=['present', 'absent']),
- name=dict(type='str', required=True),
- regenerate_key=dict(type='bool'),
- rights=dict(type='str', choices=['manage', 'listen', 'send', 'listen_send'])
-)
-
-
-class AzureRMServiceBusQueue(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- namespace=dict(type='str', required=True),
- auto_delete_on_idle_in_seconds=dict(type='int'),
- dead_lettering_on_message_expiration=dict(type='bool'),
- default_message_time_to_live_seconds=dict(type='int'),
- duplicate_detection_time_in_seconds=dict(type='int'),
- enable_batched_operations=dict(type='bool'),
- enable_express=dict(type='bool'),
- enable_partitioning=dict(type='bool'),
- forward_dead_lettered_messages_to=dict(type='str'),
- forward_to=dict(type='str'),
- lock_duration_in_seconds=dict(type='int'),
- max_delivery_count=dict(type='int'),
- max_size_in_mb=dict(type='int'),
- requires_duplicate_detection=dict(type='bool'),
- requires_session=dict(type='bool'),
- status=dict(type='str',
- choices=['active', 'disabled', 'send_disabled', 'receive_disabled'])
- )
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.namespace = None
- self.location = None
- self.type = None
- self.subscription_topic_name = None
- self.auto_delete_on_idle_in_seconds = None
- self.dead_lettering_on_message_expiration = None
- self.default_message_time_to_live_seconds = None
- self.enable_batched_operations = None
- self.enable_express = None
- self.enable_partitioning = None
- self.forward_dead_lettered_messages_to = None
- self.forward_to = None
- self.lock_duration_in_seconds = None
- self.max_delivery_count = None
- self.max_size_in_mb = None
- self.requires_duplicate_detection = None
- self.status = None
-
- self.results = dict(
- changed=False,
- id=None
- )
-
- super(AzureRMServiceBusQueue, self).__init__(self.module_arg_spec,
- supports_check_mode=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()):
- setattr(self, key, kwargs[key])
-
- changed = False
-
- original = self.get()
- if self.state == 'present':
- # Create the resource instance
- params = dict(
- dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration,
- enable_batched_operations=self.enable_batched_operations,
- enable_express=self.enable_express,
- enable_partitioning=self.enable_partitioning,
- forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to,
- forward_to=self.forward_to,
- max_delivery_count=self.max_delivery_count,
- max_size_in_megabytes=self.max_size_in_mb
- )
- if self.status:
- params['status'] = self.servicebus_models.EntityStatus(str.capitalize(_snake_to_camel(self.status)))
- for k, v in duration_spec_map.items():
- seconds = getattr(self, v)
- if seconds:
- params[k] = timedelta(seconds=seconds)
-
- instance = self.servicebus_models.SBQueue(**params)
- result = original
- if not original:
- changed = True
- result = instance
- else:
- result = original
- attribute_map = set(self.servicebus_models.SBQueue._attribute_map.keys()) - set(self.servicebus_models.SBQueue._validation.keys())
- for attribute in attribute_map:
- value = getattr(instance, attribute)
- if value and value != getattr(original, attribute):
- changed = True
- if changed and not self.check_mode:
- result = self.create_or_update(instance)
- self.results = self.to_dict(result)
- elif original:
- changed = True
- if not self.check_mode:
- self.delete()
- self.results['deleted'] = True
-
- self.results['changed'] = changed
- return self.results
-
- def create_or_update(self, param):
- try:
- client = self._get_client()
- return client.create_or_update(self.resource_group, self.namespace, self.name, param)
- except Exception as exc:
- self.fail('Error creating or updating queue {0} - {1}'.format(self.name, str(exc.inner_exception) or str(exc)))
-
- def delete(self):
- try:
- client = self._get_client()
- client.delete(self.resource_group, self.namespace, self.name)
- return True
- except Exception as exc:
- self.fail("Error deleting queue {0} - {1}".format(self.name, str(exc)))
-
- def _get_client(self):
- return self.servicebus_client.queues
-
- def get(self):
- try:
- client = self._get_client()
- return client.get(self.resource_group, self.namespace, self.name)
- except Exception:
- return None
-
- def to_dict(self, instance):
- result = dict()
- attribute_map = self.servicebus_models.SBQueue._attribute_map
- for attribute in attribute_map.keys():
- value = getattr(instance, attribute)
- if not value:
- continue
- if attribute_map[attribute]['type'] == 'duration':
- if is_valid_timedelta(value):
- key = duration_spec_map.get(attribute) or attribute
- result[key] = int(value.total_seconds())
- elif attribute == 'status':
- result['status'] = _camel_to_snake(value)
- elif isinstance(value, self.servicebus_models.MessageCountDetails):
- result[attribute] = value.as_dict()
- elif isinstance(value, self.servicebus_models.SBSku):
- result[attribute] = value.name.lower()
- elif isinstance(value, datetime):
- result[attribute] = str(value)
- elif isinstance(value, str):
- result[attribute] = to_native(value)
- elif attribute == 'max_size_in_megabytes':
- result['max_size_in_mb'] = value
- else:
- result[attribute] = value
- return result
-
-
-def is_valid_timedelta(value):
- if value == timedelta(10675199, 10085, 477581):
- return None
- return value
-
-
-def main():
- AzureRMServiceBusQueue()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_servicebussaspolicy.py b/lib/ansible/modules/cloud/azure/azure_rm_servicebussaspolicy.py
deleted file mode 100644
index 33957a4602..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_servicebussaspolicy.py
+++ /dev/null
@@ -1,328 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_servicebussaspolicy
-version_added: "2.8"
-short_description: Manage Azure Service Bus SAS policy
-description:
- - Create, update or delete an Azure Service Bus SAS policy.
-options:
- resource_group:
- description:
- - Name of resource group.
- required: true
- name:
- description:
- - Name of the SAS policy.
- required: true
- state:
- description:
- - Assert the state of the route. Use C(present) to create or update and C(absent) to delete.
- default: present
- choices:
- - absent
- - present
- namespace:
- description:
- - Manage SAS policy for a namespace without C(queue) or C(topic) set.
- - Manage SAS policy for a queue or topic under this namespace.
- required: true
- queue:
- description:
- - Type of the messaging queue.
- - Cannot set C(topc) when this field set.
- topic:
- description:
- - Name of the messaging topic.
- - Cannot set C(queue) when this field set.
- regenerate_primary_key:
- description:
- - Regenerate the SAS policy primary key.
- type: bool
- default: False
- regenerate_secondary_key:
- description:
- - Regenerate the SAS policy secondary key.
- type: bool
- default: False
- rights:
- description:
- - Claim rights of the SAS policy.
- required: True
- choices:
- - manage
- - listen
- - send
- - listen_send
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
-- name: Create a namespace
- azure_rm_servicebussaspolicy:
- name: deadbeef
- queue: qux
- namespace: bar
- resource_group: myResourceGroup
- rights: send
-'''
-RETURN = '''
-id:
- description:
- - Current state of the SAS policy.
- returned: Successed
- type: str
- sample: "/subscriptions/xxx...xxx/resourceGroups/myResourceGroup/providers/Microsoft.ServiceBus/
- namespaces/nsb57dc95979/topics/topicb57dc95979/authorizationRules/testpolicy"
-keys:
- description:
- - Key dict of the SAS policy.
- returned: Successed
- type: complex
- contains:
- key_name:
- description:
- - Name of the SAS policy.
- returned: Successed
- type: str
- sample: testpolicy
- primary_connection_string:
- description:
- - Primary connection string.
- returned: Successed
- type: str
- sample: "Endpoint=sb://nsb57dc95979.servicebus.windows.net/;SharedAccessKeyName=testpolicy;
- SharedAccessKey=r+HD3es/9aOOq0XjQtkx5KXROH1MIHDs0WxCgR23gMc=;EntityPath=topicb57dc95979"
- primary_key:
- description:
- - Primary key.
- returned: Successed
- type: str
- sample: "r+HD3es/9aOOq0XjQtkx5KXROH1MIHDs0WxCgR23gMc="
- secondary_key:
- description:
- - Secondary key.
- returned: Successed
- type: str
- sample: "/EcGztJBv72VD0Dy14bdsxi30rl+pSZMtKcs4KV3JWU="
- secondary_connection_string:
- description:
- - Secondary connection string.
- returned: Successed
- type: str
- sample: "Endpoint=sb://nsb57dc95979.servicebus.windows.net/;SharedAccessKeyName=testpolicy;
- SharedAccessKey=/EcGztJBv72VD0Dy14bdsxi30rl+pSZMtKcs4KV3JWU=;EntityPath=topicb57dc95979"
-name:
- description:
- - Name of the SAS policy.
- returned: Successed
- type: str
- sample: testpolicy
-rights:
- description:
- - Priviledge of the SAS policy.
- returned: Successed
- type: str
- sample: manage
-type:
- description:
- - Type of the SAS policy.
- returned: Successed
- type: str
- sample: "Microsoft.ServiceBus/Namespaces/Topics/AuthorizationRules"
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
-from ansible.module_utils._text import to_native
-from datetime import datetime, timedelta
-
-
-class AzureRMServiceBusSASPolicy(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- namespace=dict(type='str', required=True),
- queue=dict(type='str'),
- topic=dict(type='str'),
- regenerate_primary_key=dict(type='bool', default=False),
- regenerate_secondary_key=dict(type='bool', default=False),
- rights=dict(type='str', choices=['manage', 'listen', 'send', 'listen_send'])
- )
-
- mutually_exclusive = [
- ['queue', 'topic']
- ]
-
- required_if = [('state', 'present', ['rights'])]
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.namespace = None
- self.queue = None
- self.topic = None
- self.regenerate_primary_key = None
- self.regenerate_secondary_key = None
- self.rights = None
-
- self.results = dict(
- changed=False,
- id=None
- )
-
- super(AzureRMServiceBusSASPolicy, self).__init__(self.module_arg_spec,
- mutually_exclusive=mutually_exclusive,
- required_if=required_if,
- supports_check_mode=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()):
- setattr(self, key, kwargs[key])
-
- changed = False
-
- policy = self.get_auth_rule()
- if self.state == 'present':
- if not policy: # Create a new one
- changed = True
- if not self.check_mode:
- policy = self.create_sas_policy()
- else:
- changed = changed | self.regenerate_primary_key | self.regenerate_secondary_key
- if self.regenerate_primary_key and not self.check_mode:
- self.regenerate_sas_key('primary')
- if self.regenerate_secondary_key and not self.check_mode:
- self.regenerate_sas_key('secondary')
- self.results = self.policy_to_dict(policy)
- self.results['keys'] = self.get_sas_key()
- elif policy:
- changed = True
- if not self.check_mode:
- self.delete_sas_policy()
-
- self.results['changed'] = changed
- return self.results
-
- def _get_client(self):
- if self.queue:
- return self.servicebus_client.queues
- elif self.topic:
- return self.servicebus_client.topics
- return self.servicebus_client.namespaces
-
- # SAS policy
- def create_sas_policy(self):
- if self.rights == 'listen_send':
- rights = ['Listen', 'Send']
- elif self.rights == 'manage':
- rights = ['Listen', 'Send', 'Manage']
- else:
- rights = [str.capitalize(self.rights)]
- try:
- client = self._get_client()
- if self.queue or self.topic:
- rule = client.create_or_update_authorization_rule(self.resource_group, self.namespace, self.queue or self.topic, self.name, rights)
- else:
- rule = client.create_or_update_authorization_rule(self.resource_group, self.namespace, self.name, rights)
- return rule
- except Exception as exc:
- self.fail('Error when creating or updating SAS policy {0} - {1}'.format(self.name, exc.message or str(exc)))
- return None
-
- def get_auth_rule(self):
- rule = None
- try:
- client = self._get_client()
- if self.queue or self.topic:
- rule = client.get_authorization_rule(self.resource_group, self.namespace, self.queue or self.topic, self.name)
- else:
- rule = client.get_authorization_rule(self.resource_group, self.namespace, self.name)
- except Exception:
- pass
- return rule
-
- def delete_sas_policy(self):
- try:
- client = self._get_client()
- if self.queue or self.topic:
- client.delete_authorization_rule(self.resource_group, self.namespace, self.queue or self.topic, self.name)
- else:
- client.delete_authorization_rule(self.resource_group, self.namespace, self.name)
- return True
- except Exception as exc:
- self.fail('Error when deleting SAS policy {0} - {1}'.format(self.name, exc.message or str(exc)))
-
- def regenerate_sas_key(self, key_type):
- try:
- client = self._get_client()
- key = str.capitalize(key_type) + 'Key'
- if self.queue or self.topic:
- client.regenerate_keys(self.resource_group, self.namespace, self.queue or self.topic, self.name, key)
- else:
- client.regenerate_keys(self.resource_group, self.namespace, self.name, key)
- except Exception as exc:
- self.fail('Error when generating SAS policy {0}\'s key - {1}'.format(self.name, exc.message or str(exc)))
- return None
-
- def get_sas_key(self):
- try:
- client = self._get_client()
- if self.queue or self.topic:
- return client.list_keys(self.resource_group, self.namespace, self.queue or self.topic, self.name).as_dict()
- else:
- return client.list_keys(self.resource_group, self.namespace, self.name).as_dict()
- except Exception:
- pass
- return None
-
- def policy_to_dict(self, rule):
- result = rule.as_dict()
- rights = result['rights']
- if 'Manage' in rights:
- result['rights'] = 'manage'
- elif 'Listen' in rights and 'Send' in rights:
- result['rights'] = 'listen_send'
- else:
- result['rights'] = rights[0].lower()
- return result
-
-
-def main():
- AzureRMServiceBusSASPolicy()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_servicebustopic.py b/lib/ansible/modules/cloud/azure/azure_rm_servicebustopic.py
deleted file mode 100644
index ea8efa7567..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_servicebustopic.py
+++ /dev/null
@@ -1,302 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_servicebustopic
-version_added: "2.8"
-short_description: Manage Azure Service Bus
-description:
- - Create, update or delete an Azure Service Bus topics.
-options:
- resource_group:
- description:
- - Name of resource group.
- required: true
- name:
- description:
- - Name of the topic.
- required: true
- namespace:
- description:
- - Servicebus namespace name.
- - A namespace is a scoping container for all messaging components.
- - Multipletopics can reside within a single namespace.
- required: true
- state:
- description:
- - Assert the state of the topic. Use C(present) to create or update and use C(absent) to delete.
- default: present
- choices:
- - absent
- - present
- auto_delete_on_idle_in_seconds:
- description:
- - Time idle interval after which a topic is automatically deleted.
- - The minimum duration is 5 minutes.
- type: int
- default_message_time_to_live_seconds:
- description:
- - Default message timespan to live value.
- - This is the duration after which the message expires, starting from when the message is sent to Service Bus.
- - This is the default value used when TimeToLive is not set on a message itself.
- type: int
- enable_batched_operations:
- description:
- - Value that indicates whether server-side batched operations are enabled.
- type: bool
- enable_express:
- description:
- - Value that indicates whether Express Entities are enabled.
- - An express topic holds a message in memory temporarily before writing it to persistent storage.
- type: bool
- enable_partitioning:
- description:
- - A value that indicates whether the topic is to be partitioned across multiple message brokers.
- type: bool
- max_size_in_mb:
- description:
- - The maximum size of the topic in megabytes, which is the size of memory allocated for the topic.
- type: int
- requires_duplicate_detection:
- description:
- - A value indicating if this topic requires duplicate detection.
- type: bool
- duplicate_detection_time_in_seconds:
- description:
- - TimeSpan structure that defines the duration of the duplicate detection history.
- type: int
- support_ordering:
- description:
- - Value that indicates whether the topic supports ordering.
- type: bool
- status:
- description:
- - Status of the entity.
- choices:
- - active
- - disabled
- - send_disabled
- - receive_disabled
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
-- name: Create a topic
- azure_rm_servicebustopic:
- name: subtopic
- resource_group: myResourceGroup
- namespace: bar
- duplicate_detection_time_in_seconds: 600
-'''
-RETURN = '''
-id:
- description:
- - Current state of the topic.
- returned: success
- type: str
- sample: "/subscriptions/xxx...xxx/resourceGroups/myResourceGroup/providers/Microsoft.ServiceBus/namespaces/nsb57dc95979/topics/topicb57dc95979"
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
-from ansible.module_utils._text import to_native
-from datetime import datetime, timedelta
-
-
-duration_spec_map = dict(
- default_message_time_to_live='default_message_time_to_live_seconds',
- duplicate_detection_history_time_window='duplicate_detection_time_in_seconds',
- auto_delete_on_idle='auto_delete_on_idle_in_seconds'
-)
-
-
-sas_policy_spec = dict(
- state=dict(type='str', default='present', choices=['present', 'absent']),
- name=dict(type='str', required=True),
- regenerate_key=dict(type='bool'),
- rights=dict(type='str', choices=['manage', 'listen', 'send', 'listen_send'])
-)
-
-
-class AzureRMServiceBusTopic(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- auto_delete_on_idle_in_seconds=dict(type='int'),
- default_message_time_to_live_seconds=dict(type='int'),
- duplicate_detection_time_in_seconds=dict(type='int'),
- enable_batched_operations=dict(type='bool'),
- enable_express=dict(type='bool'),
- enable_partitioning=dict(type='bool'),
- max_size_in_mb=dict(type='int'),
- name=dict(type='str', required=True),
- namespace=dict(type='str'),
- requires_duplicate_detection=dict(type='bool'),
- resource_group=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- status=dict(type='str',
- choices=['active', 'disabled', 'send_disabled', 'receive_disabled']),
- support_ordering=dict(type='bool')
- )
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.namespace = None
- self.auto_delete_on_idle_in_seconds = None
- self.default_message_time_to_live_seconds = None
- self.enable_batched_operations = None
- self.enable_express = None
- self.enable_partitioning = None
- self.max_size_in_mb = None
- self.requires_duplicate_detection = None
- self.status = None
- self.support_ordering = None
-
- self.results = dict(
- changed=False,
- id=None
- )
-
- super(AzureRMServiceBusTopic, self).__init__(self.module_arg_spec,
- supports_check_mode=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()):
- setattr(self, key, kwargs[key])
-
- changed = False
- original = self.get()
- if self.state == 'present':
- # Create the resource instance
- params = dict(
- enable_batched_operations=self.enable_batched_operations,
- enable_express=self.enable_express,
- enable_partitioning=self.enable_partitioning,
- max_size_in_megabytes=self.max_size_in_mb,
- support_ordering=self.support_ordering
- )
- if self.status:
- params['status'] = self.servicebus_models.EntityStatus(str.capitalize(_snake_to_camel(self.status)))
- for k, v in duration_spec_map.items():
- seconds = getattr(self, v)
- if seconds:
- params[k] = timedelta(seconds=seconds)
-
- instance = self.servicebus_models.SBTopic(**params)
- result = original
- if not original:
- changed = True
- result = instance
- else:
- result = original
- attribute_map = set(self.servicebus_models.SBTopic._attribute_map.keys()) - set(self.servicebus_models.SBTopic._validation.keys())
- for attribute in attribute_map:
- value = getattr(instance, attribute)
- if value and value != getattr(original, attribute):
- changed = True
- if changed and not self.check_mode:
- result = self.create_or_update(instance)
- self.results = self.to_dict(result)
- elif original:
- changed = True
- if not self.check_mode:
- self.delete()
- self.results['deleted'] = True
-
- self.results['changed'] = changed
- return self.results
-
- def create_or_update(self, param):
- try:
- client = self._get_client()
- return client.create_or_update(self.resource_group, self.namespace, self.name, param)
- except Exception as exc:
- self.fail('Error creating or updating topic {0} - {1}'.format(self.name, str(exc.inner_exception) or str(exc)))
-
- def delete(self):
- try:
- client = self._get_client()
- client.delete(self.resource_group, self.namespace, self.name)
- return True
- except Exception as exc:
- self.fail("Error deleting topic {0} - {1}".format(self.name, str(exc)))
-
- def _get_client(self):
- return self.servicebus_client.topics
-
- def get(self):
- try:
- client = self._get_client()
- return client.get(self.resource_group, self.namespace, self.name)
- except Exception:
- return None
-
- def to_dict(self, instance):
- result = dict()
- attribute_map = self.servicebus_models.SBTopic._attribute_map
- for attribute in attribute_map.keys():
- value = getattr(instance, attribute)
- if not value:
- continue
- if attribute_map[attribute]['type'] == 'duration':
- if is_valid_timedelta(value):
- key = duration_spec_map.get(attribute) or attribute
- result[key] = int(value.total_seconds())
- elif attribute == 'status':
- result['status'] = _camel_to_snake(value)
- elif isinstance(value, self.servicebus_models.MessageCountDetails):
- result[attribute] = value.as_dict()
- elif isinstance(value, self.servicebus_models.SBSku):
- result[attribute] = value.name.lower()
- elif isinstance(value, datetime):
- result[attribute] = str(value)
- elif isinstance(value, str):
- result[attribute] = to_native(value)
- elif attribute == 'max_size_in_megabytes':
- result['max_size_in_mb'] = value
- else:
- result[attribute] = value
- return result
-
-
-def is_valid_timedelta(value):
- if value == timedelta(10675199, 10085, 477581):
- return None
- return value
-
-
-def main():
- AzureRMServiceBusTopic()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_servicebustopicsubscription.py b/lib/ansible/modules/cloud/azure/azure_rm_servicebustopicsubscription.py
deleted file mode 100644
index d4430826f4..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_servicebustopicsubscription.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_servicebustopicsubscription
-version_added: "2.8"
-short_description: Manage Azure Service Bus subscription
-description:
- - Create, update or delete an Azure Service Bus subscriptions.
-options:
- resource_group:
- description:
- - Name of resource group.
- required: true
- name:
- description:
- - Name of the servicebus subscription.
- required: true
- state:
- description:
- - Assert the state of the servicebus subscription. Use C(present) to create or update and use C(absent) to delete.
- default: present
- choices:
- - absent
- - present
- namespace:
- description:
- - Servicebus namespace name.
- - A namespace is a scoping container for all messaging components.
- - Multiple subscriptions and topics can reside within a single namespace, and namespaces often serve as application containers.
- required: true
- topic:
- description:
- - Topic name which the subscription subscribe to.
- required: true
- auto_delete_on_idle_in_seconds:
- description:
- - Time idle interval after which a subscription is automatically deleted.
- - The minimum duration is 5 minutes.
- type: int
- dead_lettering_on_message_expiration:
- description:
- - A value that indicates whether a subscription has dead letter support when a message expires.
- type: bool
- dead_lettering_on_filter_evaluation_exceptions:
- description:
- - Value that indicates whether a subscription has dead letter support on filter evaluation exceptions.
- type: bool
- default_message_time_to_live_seconds:
- description:
- - Default message timespan to live value.
- - This is the duration after which the message expires, starting from when the message is sent to Service Bus.
- - This is the default value used when TimeToLive is not set on a message itself.
- type: int
- enable_batched_operations:
- description:
- - Value that indicates whether server-side batched operations are enabled.
- type: bool
- forward_dead_lettered_messages_to:
- description:
- - Queue or topic name to forward the Dead Letter message for a subscription.
- forward_to:
- description:
- - Queue or topic name to forward the messages for a subscription.
- lock_duration_in_seconds:
- description:
- - Timespan duration of a peek-lock.
- - The amount of time that the message is locked for other receivers.
- - The maximum value for LockDuration is 5 minutes.
- type: int
- max_delivery_count:
- description:
- - he maximum delivery count.
- - A message is automatically deadlettered after this number of deliveries.
- type: int
- requires_session:
- description:
- - A value that indicates whether the subscription supports the concept of sessions.
- type: bool
- duplicate_detection_time_in_seconds:
- description:
- - TimeSpan structure that defines the duration of the duplicate detection history.
- type: int
- status:
- description:
- - Status of the entity.
- choices:
- - active
- - disabled
- - send_disabled
- - receive_disabled
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yuwei Zhou (@yuwzho)
-
-'''
-
-EXAMPLES = '''
-- name: Create a subscription
- azure_rm_servicebustopicsubscription:
- name: sbsub
- resource_group: myResourceGroup
- namespace: bar
- topic: subtopic
-'''
-RETURN = '''
-id:
- description:
- - Current state of the subscription.
- returned: success
- type: str
- sample: "/subscriptions/xxx...xxx/resourceGroups/myResourceGroup/providers/Microsoft.ServiceBus/
- namespaces/nsb57dc95979/topics/topicb57dc95979/subscriptions/subsb57dc95979"
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _snake_to_camel, _camel_to_snake
-from ansible.module_utils._text import to_native
-from datetime import datetime, timedelta
-
-
-duration_spec_map = dict(
- default_message_time_to_live='default_message_time_to_live_seconds',
- duplicate_detection_history_time_window='duplicate_detection_time_in_seconds',
- auto_delete_on_idle='auto_delete_on_idle_in_seconds',
- lock_duration='lock_duration_in_seconds'
-)
-
-
-class AzureRMServiceSubscription(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- auto_delete_on_idle_in_seconds=dict(type='int'),
- dead_lettering_on_filter_evaluation_exceptions=dict(type='bool'),
- dead_lettering_on_message_expiration=dict(type='bool'),
- default_message_time_to_live_seconds=dict(type='int'),
- duplicate_detection_time_in_seconds=dict(type='int'),
- enable_batched_operations=dict(type='bool'),
- forward_dead_lettered_messages_to=dict(type='str'),
- forward_to=dict(type='str'),
- lock_duration_in_seconds=dict(type='int'),
- max_delivery_count=dict(type='int'),
- name=dict(type='str', required=True),
- namespace=dict(type='str', required=True),
- requires_session=dict(type='bool'),
- resource_group=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- status=dict(type='str',
- choices=['active', 'disabled', 'send_disabled', 'receive_disabled']),
- topic=dict(type='str', required=True)
- )
-
- self.auto_delete_on_idle_in_seconds = None
- self.dead_lettering_on_filter_evaluation_exceptions = None
- self.dead_lettering_on_message_expiration = None
- self.default_message_time_to_live_seconds = None
- self.duplicate_detection_time_in_seconds = None
- self.enable_batched_operations = None
- self.forward_dead_lettered_messages_to = None
- self.forward_to = None
- self.lock_duration_in_seconds = None
- self.max_delivery_count = None
- self.name = None
- self.namespace = None
- self.requires_session = None
- self.resource_group = None
- self.state = None
- self.status = None
- self.topic = None
-
- self.results = dict(
- changed=False,
- id=None
- )
-
- super(AzureRMServiceSubscription, self).__init__(self.module_arg_spec,
- supports_check_mode=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()):
- setattr(self, key, kwargs[key])
-
- changed = False
-
- original = self.get()
- if self.state == 'present':
- # Create the resource instance
- params = dict(
- dead_lettering_on_filter_evaluation_exceptions=self.dead_lettering_on_filter_evaluation_exceptions,
- dead_lettering_on_message_expiration=self.dead_lettering_on_message_expiration,
- enable_batched_operations=self.enable_batched_operations,
- forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to,
- forward_to=self.forward_to,
- max_delivery_count=self.max_delivery_count,
- requires_session=self.requires_session
- )
- if self.status:
- params['status'] = self.servicebus_models.EntityStatus(str.capitalize(_snake_to_camel(self.status)))
- for k, v in duration_spec_map.items():
- seconds = getattr(self, v)
- if seconds:
- params[k] = timedelta(seconds=seconds)
-
- instance = self.servicebus_models.SBSubscription(**params)
- result = original
- if not original:
- changed = True
- result = instance
- else:
- result = original
- attribute_map_keys = set(self.servicebus_models.SBSubscription._attribute_map.keys())
- validation_keys = set(self.servicebus_models.SBSubscription._validation.keys())
- attribute_map = attribute_map_keys - validation_keys
- for attribute in attribute_map:
- value = getattr(instance, attribute)
- if value and value != getattr(original, attribute):
- changed = True
- if changed and not self.check_mode:
- result = self.create_or_update(instance)
- self.results = self.to_dict(result)
- elif original:
- changed = True
- if not self.check_mode:
- self.delete()
- self.results['deleted'] = True
-
- self.results['changed'] = changed
- return self.results
-
- def create_or_update(self, param):
- try:
- client = self._get_client()
- return client.create_or_update(self.resource_group, self.namespace, self.topic, self.name, param)
- except Exception as exc:
- self.fail("Error creating or updating servicebus subscription {0} - {1}".format(self.name, str(exc)))
-
- def delete(self):
- try:
- client = self._get_client()
- client.delete(self.resource_group, self.namespace, self.topic, self.name)
- return True
- except Exception as exc:
- self.fail("Error deleting servicebus subscription {0} - {1}".format(self.name, str(exc)))
-
- def _get_client(self):
- return self.servicebus_client.subscriptions
-
- def get(self):
- try:
- client = self._get_client()
- return client.get(self.resource_group, self.namespace, self.topic, self.name)
- except Exception:
- return None
-
- def to_dict(self, instance):
- result = dict()
- attribute_map = self.servicebus_models.SBSubscription._attribute_map
- for attribute in attribute_map.keys():
- value = getattr(instance, attribute)
- if not value:
- continue
- if attribute_map[attribute]['type'] == 'duration':
- if is_valid_timedelta(value):
- key = duration_spec_map.get(attribute) or attribute
- result[key] = int(value.total_seconds())
- elif attribute == 'status':
- result['status'] = _camel_to_snake(value)
- elif isinstance(value, self.servicebus_models.MessageCountDetails):
- result[attribute] = value.as_dict()
- elif isinstance(value, self.servicebus_models.SBSku):
- result[attribute] = value.name.lower()
- elif isinstance(value, datetime):
- result[attribute] = str(value)
- elif isinstance(value, str):
- result[attribute] = to_native(value)
- elif attribute == 'max_size_in_megabytes':
- result['max_size_in_mb'] = value
- else:
- result[attribute] = value
- return result
-
-
-def is_valid_timedelta(value):
- if value == timedelta(10675199, 10085, 477581):
- return None
- return value
-
-
-def main():
- AzureRMServiceSubscription()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_snapshot.py b/lib/ansible/modules/cloud/azure/azure_rm_snapshot.py
deleted file mode 100644
index 759fd2d69b..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_snapshot.py
+++ /dev/null
@@ -1,391 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_snapshot
-version_added: '2.9'
-short_description: Manage Azure Snapshot instance.
-description:
- - Create, update and delete instance of Azure Snapshot.
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: true
- type: str
- name:
- description:
- - Resource name.
- type: str
- location:
- description:
- - Resource location.
- type: str
- sku:
- description:
- - The snapshots SKU.
- type: dict
- suboptions:
- name:
- description:
- - The sku name.
- type: str
- choices:
- - Standard_LRS
- - Premium_LRS
- - Standard_ZRS
- tier:
- description:
- - The sku tier.
- type: str
- os_type:
- description:
- - The Operating System type.
- type: str
- choices:
- - Linux
- - Windows
- creation_data:
- description:
- - Disk source information.
- - CreationData information cannot be changed after the disk has been created.
- type: dict
- suboptions:
- create_option:
- description:
- - This enumerates the possible sources of a disk's creation.
- type: str
- choices:
- - Import
- - Copy
- source_uri:
- description:
- - If I(createOption=Import), this is the URI of a blob to be imported into a managed disk.
- type: str
- source_id:
- description:
- - If I(createOption=Copy), this is the resource ID of a managed disk to be copied from.
- type: str
- state:
- description:
- - Assert the state of the Snapshot.
- - Use C(present) to create or update an Snapshot and C(absent) to delete it.
- default: present
- type: str
- choices:
- - absent
- - present
-extends_documentation_fragment:
- - azure
- - azure_tags
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
-- name: Create a snapshot by importing an unmanaged blob from the same subscription.
- azure_rm_snapshot:
- resource_group: myResourceGroup
- name: mySnapshot
- location: eastus
- creation_data:
- create_option: Import
- source_uri: 'https://mystorageaccount.blob.core.windows.net/osimages/osimage.vhd'
-
-- name: Create a snapshot by copying an existing managed disk.
- azure_rm_snapshot:
- resource_group: myResourceGroup
- name: mySnapshot
- location: eastus
- creation_data:
- create_option: Copy
- source_id: '/subscriptions/sub123/resourceGroups/group123/providers/Microsoft.Compute/disks/disk123'
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/snapshots/mySnapshot
-'''
-
-import time
-import json
-from ansible.module_utils.azure_rm_common_ext import AzureRMModuleBaseExt
-from ansible.module_utils.azure_rm_common_rest import GenericRestClient
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # this is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMSnapshots(AzureRMModuleBaseExt):
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- updatable=False,
- disposition='resourceGroupName',
- required=True
- ),
- name=dict(
- type='str',
- updatable=False,
- disposition='snapshotName',
- required=True
- ),
- location=dict(
- type='str',
- updatable=False,
- disposition='/'
- ),
- sku=dict(
- type='dict',
- disposition='/',
- options=dict(
- name=dict(
- type='str',
- choices=['Standard_LRS',
- 'Premium_LRS',
- 'Standard_ZRS']
- ),
- tier=dict(
- type='str'
- )
- )
- ),
- os_type=dict(
- type='str',
- disposition='/properties/osType',
- choices=['Windows',
- 'Linux']
- ),
- creation_data=dict(
- type='dict',
- disposition='/properties/creationData',
- options=dict(
- create_option=dict(
- type='str',
- disposition='createOption',
- choices=['Import', 'Copy'],
- ),
- source_uri=dict(
- type='str',
- disposition='sourceUri',
- purgeIfNone=True
- ),
- source_id=dict(
- type='str',
- disposition='sourceResourceId',
- purgeIfNone=True
- )
- )
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.name = None
- self.id = None
- self.name = None
- self.type = None
- self.managed_by = None
-
- self.results = dict(changed=False)
- self.mgmt_client = None
- self.state = None
- self.url = None
- self.status_code = [200, 201, 202]
- self.to_do = Actions.NoAction
-
- self.body = {}
- self.query_parameters = {}
- self.query_parameters['api-version'] = '2018-09-30'
- self.header_parameters = {}
- self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
-
- super(AzureRMSnapshots, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- self.body[key] = kwargs[key]
-
- self.inflate_parameters(self.module_arg_spec, self.body, 0)
-
- old_response = None
- response = None
-
- self.mgmt_client = self.get_mgmt_svc_client(GenericRestClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- resource_group = self.get_resource_group(self.resource_group)
-
- if 'location' not in self.body:
- self.body['location'] = resource_group.location
-
- self.url = ('/subscriptions' +
- '/{{ subscription_id }}' +
- '/resourceGroups' +
- '/{{ resource_group }}' +
- '/providers' +
- '/Microsoft.Compute' +
- '/snapshots' +
- '/{{ snapshot_name }}')
- self.url = self.url.replace('{{ subscription_id }}', self.subscription_id)
- self.url = self.url.replace('{{ resource_group }}', self.resource_group)
- self.url = self.url.replace('{{ snapshot_name }}', self.name)
-
- old_response = self.get_resource()
-
- if not old_response:
- self.log("Snapshot instance doesn't exist")
-
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log('Snapshot instance already exists')
-
- if self.state == 'absent':
- self.to_do = Actions.Delete
- else:
- modifiers = {}
- self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
- self.results['modifiers'] = modifiers
- self.results['compare'] = []
- self.create_compare_modifiers(self.module_arg_spec, '', modifiers)
- if not self.default_compare(modifiers, self.body, old_response, '', self.results):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log('Need to Create / Update the Snapshot instance')
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
- response = self.create_update_resource()
- self.results['changed'] = True
- self.log('Creation / Update done')
- elif self.to_do == Actions.Delete:
- self.log('Snapshot instance deleted')
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_resource()
-
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_resource():
- time.sleep(20)
- else:
- self.log('Snapshot instance unchanged')
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
-
- return self.results
-
- def create_update_resource(self):
- # self.log('Creating / Updating the Snapshot instance {0}'.format(self.))
- try:
- response = self.mgmt_client.query(url=self.url,
- method='PUT',
- query_parameters=self.query_parameters,
- header_parameters=self.header_parameters,
- body=self.body,
- expected_status_codes=self.status_code,
- polling_timeout=600,
- polling_interval=30)
- except CloudError as exc:
- self.log('Error attempting to create the Snapshot instance.')
- self.fail('Error creating the Snapshot instance: {0}'.format(str(exc)))
-
- try:
- response = json.loads(response.text)
- except Exception:
- response = {'text': response.text}
-
- return response
-
- def delete_resource(self):
- # self.log('Deleting the Snapshot instance {0}'.format(self.))
- try:
- response = self.mgmt_client.query(url=self.url,
- method='DELETE',
- query_parameters=self.query_parameters,
- header_parameters=self.header_parameters,
- body=None,
- expected_status_codes=self.status_code,
- polling_timeout=600,
- polling_interval=30)
- except CloudError as e:
- self.log('Error attempting to delete the Snapshot instance.')
- self.fail('Error deleting the Snapshot instance: {0}'.format(str(e)))
-
- return True
-
- def get_resource(self):
- # self.log('Checking if the Snapshot instance {0} is present'.format(self.))
- found = False
- try:
- response = self.mgmt_client.query(url=self.url,
- method='GET',
- query_parameters=self.query_parameters,
- header_parameters=self.header_parameters,
- body=None,
- expected_status_codes=self.status_code,
- polling_timeout=600,
- polling_interval=30)
- response = json.loads(response.text)
- found = True
- self.log("Response : {0}".format(response))
- # self.log("Snapshot instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the Snapshot instance.')
- if found is True:
- return response
-
- return False
-
-
-def main():
- AzureRMSnapshots()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_sqldatabase.py b/lib/ansible/modules/cloud/azure/azure_rm_sqldatabase.py
deleted file mode 100644
index 3dfcf1b904..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_sqldatabase.py
+++ /dev/null
@@ -1,514 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_sqldatabase
-version_added: "2.5"
-short_description: Manage SQL Database instance
-description:
- - Create, update and delete instance of SQL Database.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- server_name:
- description:
- - The name of the server.
- required: True
- name:
- description:
- - The name of the database to be operated on (updated or created).
- required: True
- location:
- description:
- - Resource location. If not set, location from the resource group will be used as default.
- collation:
- description:
- - The collation of the database. If not I(create_mode=default), this value is ignored.
- create_mode:
- description:
- - Specifies the mode of database creation.
- - C(default), regular database creation.
- - C(copy), creates a database as a copy of an existing database.
- - C(online_secondary)/C(non_readable_secondary), creates a database as a (readable or nonreadable) secondary replica of an existing database.
- - C(point_in_time_restore), Creates a database by restoring a point in time backup of an existing database.
- - C(recovery), Creates a database by restoring a geo-replicated backup.
- - C(restore), Creates a database by restoring a backup of a deleted database.
- - C(restore_long_term_retention_backup), Creates a database by restoring from a long term retention vault.
- - C(copy), C(non_readable_secondary), C(online_secondary) and C(restore_long_term_retention_backup) are not supported for C(data_warehouse) edition.
- choices:
- - 'copy'
- - 'default'
- - 'non_readable_secondary'
- - 'online_secondary'
- - 'point_in_time_restore'
- - 'recovery'
- - 'restore'
- - 'restore_long_term_retention_backup'
- source_database_id:
- description:
- - Required unless I(create_mode=default) or I(create_mode=restore_long_term_retention_backup).
- - Specifies the resource ID of the source database.
- source_database_deletion_date:
- description:
- - Required if I(create_mode=restore) and I(source_database_id) is the deleted database's original resource id when it existed (as
- opposed to its current restorable dropped database ID), then this value is required. Specifies the time that the database was deleted.
- restore_point_in_time:
- description:
- - Required if I(create_mode=point_in_time_restore), this value is required. If I(create_mode=restore), this value is optional.
- - Specifies the point in time (ISO8601 format) of the source database that will be restored to create the new database.
- - Must be greater than or equal to the source database's earliestRestoreDate value.
- recovery_services_recovery_point_resource_id:
- description:
- - Required if I(create_mode=restore_long_term_retention_backup), then this value is required.
- - Specifies the resource ID of the recovery point to restore from.
- edition:
- description:
- - The edition of the database. The DatabaseEditions enumeration contains all the valid editions.
- - If I(create_mode=non_readable_secondary) or I(create_mode=online_secondary), this value is ignored.
- - To see possible values, query the capabilities API (/subscriptions/{subscriptionId}/providers/Microsoft.Sql/locations/{locationID}/capabilities)
- referred to by operationId:'Capabilities_ListByLocation'.
- choices:
- - 'web'
- - 'business'
- - 'basic'
- - 'standard'
- - 'premium'
- - 'free'
- - 'stretch'
- - 'data_warehouse'
- - 'system'
- - 'system2'
- max_size_bytes:
- description:
- - The max size of the database expressed in bytes.
- - If not I(create_mode=default), this value is ignored.
- - To see possible values, query the capabilities API (/subscriptions/{subscriptionId}/providers/Microsoft.Sql/locations/{locationID}/capabilities).
- referred to by operationId:'Capabilities_ListByLocation'.
- elastic_pool_name:
- description:
- - The name of the elastic pool the database is in. Not supported for I(edition=data_warehouse).
- read_scale:
- description:
- - If the database is a geo-secondary, indicates whether read-only connections are allowed to this database or not.
- - Not supported for I(edition=data_warehouse).
- type: bool
- default: False
- sample_name:
- description:
- - Indicates the name of the sample schema to apply when creating this database.
- - If not I(create_mode=default), this value is ignored.
- - Not supported for I(edition=data_warehouse).
- choices:
- - 'adventure_works_lt'
- zone_redundant:
- description:
- - Is this database is zone redundant? It means the replicas of this database will be spread across multiple availability zones.
- type: bool
- default: False
- force_update:
- description:
- - SQL Database will be updated if given parameters differ from existing resource state.
- - To force SQL Database update in any circumstances set this parameter to True.
- type: bool
- state:
- description:
- - Assert the state of the SQL Database. Use C(present) to create or update an SQL Database and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Create (or update) SQL Database
- azure_rm_sqldatabase:
- resource_group: myResourceGroup
- server_name: sqlcrudtest-5961
- name: testdb
- location: eastus
-
- - name: Restore SQL Database
- azure_rm_sqldatabase:
- resource_group: myResourceGroup
- server_name: sqlcrudtest-5961
- name: restoreddb
- location: eastus
- create_mode: restore
- restorable_dropped_database_id: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/s
- ervers/testsvr/restorableDroppedDatabases/testdb2,131444841315030000"
-
- - name: Create SQL Database in Copy Mode
- azure_rm_sqldatabase:
- resource_group: myResourceGroup
- server_name: sqlcrudtest-5961
- name: copydb
- location: eastus
- create_mode: copy
- source_database_id: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/servers/tests
- vr/databases/testdb"
-
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/servers/sqlcrudtest-5961/databases/t
- estdb"
-database_id:
- description:
- - The ID of the database.
- returned: always
- type: str
- sample: database_id
-status:
- description:
- - The status of the database.
- returned: always
- type: str
- sample: Online
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from azure.mgmt.sql import SqlManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMSqlDatabase(AzureRMModuleBase):
- """Configuration class for an Azure RM SQL Database resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- collation=dict(
- type='str'
- ),
- create_mode=dict(
- type='str',
- choices=['copy',
- 'default',
- 'non_readable_secondary',
- 'online_secondary',
- 'point_in_time_restore',
- 'recovery',
- 'restore',
- 'restore_long_term_retention_backup']
- ),
- source_database_id=dict(
- type='str'
- ),
- source_database_deletion_date=dict(
- type='datetime'
- ),
- restore_point_in_time=dict(
- type='datetime'
- ),
- recovery_services_recovery_point_resource_id=dict(
- type='str'
- ),
- edition=dict(
- type='str',
- choices=['web',
- 'business',
- 'basic',
- 'standard',
- 'premium',
- 'free',
- 'stretch',
- 'data_warehouse',
- 'system',
- 'system2']
- ),
- max_size_bytes=dict(
- type='str'
- ),
- elastic_pool_name=dict(
- type='str'
- ),
- read_scale=dict(
- type='bool',
- default=False
- ),
- sample_name=dict(
- type='str',
- choices=['adventure_works_lt']
- ),
- zone_redundant=dict(
- type='bool',
- default=False
- ),
- force_update=dict(
- type='bool'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.server_name = None
- self.name = None
- self.parameters = dict()
- self.tags = None
-
- self.results = dict(changed=False)
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMSqlDatabase, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- if key == "location":
- self.parameters["location"] = kwargs[key]
- elif key == "collation":
- self.parameters["collation"] = kwargs[key]
- elif key == "create_mode":
- self.parameters["create_mode"] = _snake_to_camel(kwargs[key], True)
- elif key == "source_database_id":
- self.parameters["source_database_id"] = kwargs[key]
- elif key == "source_database_deletion_date":
- self.parameters["source_database_deletion_date"] = kwargs[key]
- elif key == "restore_point_in_time":
- self.parameters["restore_point_in_time"] = kwargs[key]
- elif key == "recovery_services_recovery_point_resource_id":
- self.parameters["recovery_services_recovery_point_resource_id"] = kwargs[key]
- elif key == "edition":
- self.parameters["edition"] = _snake_to_camel(kwargs[key], True)
- elif key == "max_size_bytes":
- self.parameters["max_size_bytes"] = kwargs[key]
- elif key == "elastic_pool_name":
- self.parameters["elastic_pool_id"] = kwargs[key]
- elif key == "read_scale":
- self.parameters["read_scale"] = 'Enabled' if kwargs[key] else 'Disabled'
- elif key == "sample_name":
- ev = kwargs[key]
- if ev == 'adventure_works_lt':
- ev = 'AdventureWorksLT'
- self.parameters["sample_name"] = ev
- elif key == "zone_redundant":
- self.parameters["zone_redundant"] = True if kwargs[key] else False
-
- old_response = None
- response = None
-
- resource_group = self.get_resource_group(self.resource_group)
-
- if "location" not in self.parameters:
- self.parameters["location"] = resource_group.location
-
- if "elastic_pool_id" in self.parameters:
- self.format_elastic_pool_id()
-
- old_response = self.get_sqldatabase()
-
- if not old_response:
- self.log("SQL Database instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("SQL Database instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.log("Need to check if SQL Database instance has to be deleted or may be updated")
- if ('location' in self.parameters) and (self.parameters['location'] != old_response['location']):
- self.to_do = Actions.Update
- if (('read_scale' in self.parameters) and
- (self.parameters['read_scale'] != old_response['read_scale'])):
- self.to_do = Actions.Update
- if (('max_size_bytes' in self.parameters) and
- (self.parameters['max_size_bytes'] != old_response['max_size_bytes'])):
- self.to_do = Actions.Update
- if (('edition' in self.parameters) and
- (self.parameters['edition'] != old_response['edition'])):
- self.to_do = Actions.Update
- update_tags, newtags = self.update_tags(old_response.get('tags', dict()))
- if update_tags:
- self.tags = newtags
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the SQL Database instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- self.parameters['tags'] = self.tags
- response = self.create_update_sqldatabase()
-
- if not old_response:
- self.results['changed'] = True
- else:
- self.results['changed'] = old_response.__ne__(response)
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("SQL Database instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_sqldatabase()
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_sqldatabase():
- time.sleep(20)
- else:
- self.log("SQL Database instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
- self.results["database_id"] = response["database_id"]
- self.results["status"] = response["status"]
-
- return self.results
-
- def create_update_sqldatabase(self):
- '''
- Creates or updates SQL Database with the specified configuration.
-
- :return: deserialized SQL Database instance state dictionary
- '''
- self.log("Creating / Updating the SQL Database instance {0}".format(self.name))
-
- try:
- response = self.sql_client.databases.create_or_update(resource_group_name=self.resource_group,
- server_name=self.server_name,
- database_name=self.name,
- parameters=self.parameters)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the SQL Database instance.')
- self.fail("Error creating the SQL Database instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_sqldatabase(self):
- '''
- Deletes specified SQL Database instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the SQL Database instance {0}".format(self.name))
- try:
- response = self.sql_client.databases.delete(resource_group_name=self.resource_group,
- server_name=self.server_name,
- database_name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the SQL Database instance.')
- self.fail("Error deleting the SQL Database instance: {0}".format(str(e)))
-
- return True
-
- def get_sqldatabase(self):
- '''
- Gets the properties of the specified SQL Database.
-
- :return: deserialized SQL Database instance state dictionary
- '''
- self.log("Checking if the SQL Database instance {0} is present".format(self.name))
- found = False
- try:
- response = self.sql_client.databases.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- database_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("SQL Database instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the SQL Database instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
- def format_elastic_pool_id(self):
- parrent_id = format_resource_id(val=self.server_name,
- subscription_id=self.subscription_id,
- namespace="Microsoft.Sql",
- types="servers",
- resource_group=self.resource_group)
- self.parameters['elastic_pool_id'] = parrent_id + "/elasticPools/" + self.parameters['elastic_pool_id']
-
-
-def _snake_to_camel(snake, capitalize_first=False):
- if capitalize_first:
- return ''.join(x.capitalize() or '_' for x in snake.split('_'))
- else:
- return snake.split('_')[0] + ''.join(x.capitalize() or '_' for x in snake.split('_')[1:])
-
-
-def main():
- """Main execution"""
- AzureRMSqlDatabase()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_sqldatabase_info.py b/lib/ansible/modules/cloud/azure/azure_rm_sqldatabase_info.py
deleted file mode 100644
index e0535b87d3..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_sqldatabase_info.py
+++ /dev/null
@@ -1,288 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_sqldatabase_info
-version_added: "2.8"
-short_description: Get Azure SQL Database facts
-description:
- - Get facts of Azure SQL Database.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- server_name:
- description:
- - The name of the server.
- required: True
- name:
- description:
- - The name of the database.
- elastic_pool_name:
- description:
- - The name of the elastic pool.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of SQL Database
- azure_rm_sqldatabase_info:
- resource_group: testrg
- server_name: testserver
- name: testdb
-
- - name: List instances of SQL Database
- azure_rm_sqldatabase_info:
- resource_group: testrg
- server_name: testserver
- elastic_pool_name: testep
-
- - name: List instances of SQL Database
- azure_rm_sqldatabase_info:
- resource_group: testrg
- server_name: testserver
-'''
-
-RETURN = '''
-databases:
- description:
- - A list of dictionaries containing facts for SQL Database.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/testrg/providers/Microsoft.Sql/servers/testserver/databases/testdb
- name:
- description:
- - Database name.
- returned: always
- type: str
- sample: testdb
- location:
- description:
- - Resource location.
- returned: always
- type: str
- sample: southeastasia
- tags:
- description:
- - Resource tags.
- returned: always
- type: dict
- sample: { 'taga':'aaa', 'tagb':'bbb' }
- sku:
- description:
- - The name and tier of the SKU.
- returned: always
- type: complex
- contains:
- name:
- description:
- - The name of the SKU.
- returned: always
- type: str
- sample: BC_Gen4_2
- tier:
- description:
- - The SKU tier.
- returned: always
- type: str
- sample: BusinessCritical
- capacity:
- description:
- - The SKU capacity.
- returned: always
- type: int
- sample: 2
- kind:
- description:
- - Kind of database. This is metadata used for the Azure portal experience.
- returned: always
- type: str
- sample: v12.0,user
- collation:
- description:
- - The collation of the database.
- returned: always
- type: str
- sample: SQL_Latin1_General_CP1_CI_AS
- status:
- description:
- - The status of the database.
- returned: always
- type: str
- sample: Online
- zone_redundant:
- description:
- - Whether or not this database is zone redundant, which means the replicas of this database will be spread across multiple availability zones.
- returned: always
- type: bool
- sample: true
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.sql import SqlManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMSqlDatabaseInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- ),
- elastic_pool_name=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.resource_group = None
- self.server_name = None
- self.name = None
- self.elastic_pool_name = None
- self.tags = None
- super(AzureRMSqlDatabaseInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_sqldatabase_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_sqldatabase_facts' module has been renamed to 'azure_rm_sqldatabase_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name is not None:
- self.results['databases'] = self.get()
- elif self.elastic_pool_name is not None:
- self.results['databases'] = self.list_by_elastic_pool()
- else:
- self.results['databases'] = self.list_by_server()
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.sql_client.databases.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- database_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Databases.')
-
- if response and self.has_tags(response.tags, self.tags):
- results.append(self.format_item(response))
-
- return results
-
- def list_by_elastic_pool(self):
- response = None
- results = []
- try:
- response = self.sql_client.databases.list_by_elastic_pool(resource_group_name=self.resource_group,
- server_name=self.server_name,
- elastic_pool_name=self.elastic_pool_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not get facts for Databases.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_item(item))
-
- return results
-
- def list_by_server(self):
- response = None
- results = []
- try:
- response = self.sql_client.databases.list_by_server(resource_group_name=self.resource_group,
- server_name=self.server_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not get facts for Databases.')
-
- if response is not None:
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_item(item))
-
- return results
-
- def format_item(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.resource_group,
- 'id': d.get('id', None),
- 'name': d.get('name', None),
- 'location': d.get('location', None),
- 'tags': d.get('tags', None),
- 'sku': {
- 'name': d.get('sku', {}).get('name', None),
- 'tier': d.get('sku', {}).get('tier', None),
- 'capacity': d.get('sku', {}).get('capacity', None)
- },
- 'kind': d.get('kind', None),
- 'collation': d.get('collation', None),
- 'status': d.get('status', None),
- 'zone_redundant': d.get('zone_redundant', None)
- }
- return d
-
-
-def main():
- AzureRMSqlDatabaseInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule.py b/lib/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule.py
deleted file mode 100644
index f09330a150..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule.py
+++ /dev/null
@@ -1,273 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_sqlfirewallrule
-version_added: "2.7"
-short_description: Manage Firewall Rule instance
-description:
- - Create, update and delete instance of Firewall Rule.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- server_name:
- description:
- - The name of the server.
- required: True
- name:
- description:
- - The name of the firewall rule.
- required: True
- start_ip_address:
- description:
- - The start IP address of the firewall rule.
- - Must be IPv4 format. Use value C(0.0.0.0) to represent all Azure-internal IP addresses.
- end_ip_address:
- description:
- - The end IP address of the firewall rule.
- - Must be IPv4 format. Must be greater than or equal to I(start_ip_address). Use value C(0.0.0.0) to represent all Azure-internal IP addresses.
- state:
- description:
- - State of the SQL Database. Use C(present) to create or update an SQL Database and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Create (or update) Firewall Rule
- azure_rm_sqlfirewallrule:
- resource_group: myResourceGroup
- server_name: firewallrulecrudtest-6285
- name: firewallrulecrudtest-5370
- start_ip_address: 172.28.10.136
- end_ip_address: 172.28.10.138
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/servers/firewallrulecrudtest-628
- 5/firewallRules/firewallrulecrudtest-5370"
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from azure.mgmt.sql import SqlManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMSqlFirewallRule(AzureRMModuleBase):
- """Configuration class for an Azure RM Firewall Rule resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- start_ip_address=dict(
- type='str'
- ),
- end_ip_address=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.server_name = None
- self.name = None
- self.start_ip_address = None
- self.end_ip_address = None
-
- self.results = dict(changed=False)
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMSqlFirewallRule, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()):
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
-
- old_response = self.get_firewallrule()
- response = None
-
- if not old_response:
- self.log("Firewall Rule instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("Firewall Rule instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.log("Need to check if Firewall Rule instance has to be deleted or may be updated")
- if (self.start_ip_address is not None) and (self.start_ip_address != old_response['start_ip_address']):
- self.to_do = Actions.Update
- if (self.end_ip_address is not None) and (self.end_ip_address != old_response['end_ip_address']):
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the Firewall Rule instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- response = self.create_update_firewallrule()
-
- if not old_response:
- self.results['changed'] = True
- else:
- self.results['changed'] = old_response.__ne__(response)
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("Firewall Rule instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_firewallrule()
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_firewallrule():
- time.sleep(20)
- else:
- self.log("Firewall Rule instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
-
- return self.results
-
- def create_update_firewallrule(self):
- '''
- Creates or updates Firewall Rule with the specified configuration.
-
- :return: deserialized Firewall Rule instance state dictionary
- '''
- self.log("Creating / Updating the Firewall Rule instance {0}".format(self.name))
-
- try:
- response = self.sql_client.firewall_rules.create_or_update(resource_group_name=self.resource_group,
- server_name=self.server_name,
- firewall_rule_name=self.name,
- start_ip_address=self.start_ip_address,
- end_ip_address=self.end_ip_address)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the Firewall Rule instance.')
- self.fail("Error creating the Firewall Rule instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_firewallrule(self):
- '''
- Deletes specified Firewall Rule instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the Firewall Rule instance {0}".format(self.name))
- try:
- response = self.sql_client.firewall_rules.delete(resource_group_name=self.resource_group,
- server_name=self.server_name,
- firewall_rule_name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the Firewall Rule instance.')
- self.fail("Error deleting the Firewall Rule instance: {0}".format(str(e)))
-
- return True
-
- def get_firewallrule(self):
- '''
- Gets the properties of the specified Firewall Rule.
-
- :return: deserialized Firewall Rule instance state dictionary
- '''
- self.log("Checking if the Firewall Rule instance {0} is present".format(self.name))
- found = False
- try:
- response = self.sql_client.firewall_rules.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- firewall_rule_name=self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("Firewall Rule instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the Firewall Rule instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMSqlFirewallRule()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule_info.py b/lib/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule_info.py
deleted file mode 100644
index 88858205c0..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule_info.py
+++ /dev/null
@@ -1,215 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_sqlfirewallrule_info
-version_added: "2.8"
-short_description: Get Azure SQL Firewall Rule facts
-description:
- - Get facts of SQL Firewall Rule.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the server.
- required: True
- server_name:
- description:
- - The name of the server.
- required: True
- name:
- description:
- - The name of the firewall rule.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of SQL Firewall Rule
- azure_rm_sqlfirewallrule_info:
- resource_group: myResourceGroup
- server_name: testserver
- name: testrule
-
- - name: List instances of SQL Firewall Rule
- azure_rm_sqlfirewallrule_info:
- resource_group: myResourceGroup
- server_name: testserver
-'''
-
-RETURN = '''
-rules:
- description:
- - A list of dict results containing the facts for matching SQL firewall rules.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/servers/testser
- ver/firewallRules/testrule"
- resource_group:
- description:
- - Resource group name.
- returned: always
- type: str
- sample: testgroup
- server_name:
- description:
- - SQL server name.
- returned: always
- type: str
- sample: testserver
- name:
- description:
- - Firewall rule name.
- returned: always
- type: str
- sample: testrule
- start_ip_address:
- description:
- - The start IP address of the firewall rule.
- returned: always
- type: str
- sample: 10.0.0.1
- end_ip_address:
- description:
- - The end IP address of the firewall rule.
- returned: always
- type: str
- sample: 10.0.0.5
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from azure.mgmt.sql import SqlManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMSqlFirewallRuleInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.resource_group = None
- self.server_name = None
- self.name = None
- super(AzureRMSqlFirewallRuleInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_sqlfirewallrule_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_sqlfirewallrule_facts' module has been renamed to 'azure_rm_sqlfirewallrule_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if (self.name is not None):
- self.results['rules'] = self.get()
- else:
- self.results['rules'] = self.list_by_server()
- return self.results
-
- def get(self):
- '''
- Gets facts of the specified SQL Firewall Rule.
-
- :return: deserialized SQL Firewall Ruleinstance state dictionary
- '''
- response = None
- results = []
- try:
- response = self.sql_client.firewall_rules.get(resource_group_name=self.resource_group,
- server_name=self.server_name,
- firewall_rule_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for FirewallRules.')
-
- if response is not None:
- results.append(self.format_item(response))
-
- return results
-
- def list_by_server(self):
- '''
- Gets facts of the specified SQL Firewall Rule.
-
- :return: deserialized SQL Firewall Ruleinstance state dictionary
- '''
- response = None
- results = []
- try:
- response = self.sql_client.firewall_rules.list_by_server(resource_group_name=self.resource_group,
- server_name=self.server_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for FirewallRules.')
-
- if response is not None:
- for item in response:
- results.append(self.format_item(item))
-
- return results
-
- def format_item(self, item):
- d = item.as_dict()
- d = {
- 'id': d['id'],
- 'resource_group': self.resource_group,
- 'server_name': self.server_name,
- 'name': d['name'],
- 'start_ip_address': d['start_ip_address'],
- 'end_ip_address': d['end_ip_address']
- }
- return d
-
-
-def main():
- AzureRMSqlFirewallRuleInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_sqlserver.py b/lib/ansible/modules/cloud/azure/azure_rm_sqlserver.py
deleted file mode 100644
index 1f5f64c4b3..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_sqlserver.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_sqlserver
-version_added: "2.5"
-short_description: Manage SQL Server instance
-description:
- - Create, update and delete instance of SQL Server.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- name:
- description:
- - The name of the server.
- required: True
- location:
- description:
- - Resource location.
- admin_username:
- description:
- - Administrator username for the server. Once created it cannot be changed.
- admin_password:
- description:
- - The administrator login password (required for server creation).
- version:
- description:
- - The version of the server. For example C(12.0).
- identity:
- description:
- - The identity type. Set this to C(SystemAssigned) in order to automatically create and assign an Azure Active Directory principal for the resource.
- - Possible values include C(SystemAssigned).
- state:
- description:
- - State of the SQL server. Use C(present) to create or update a server and use C(absent) to delete a server.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Create (or update) SQL Server
- azure_rm_sqlserver:
- resource_group: myResourceGroup
- name: server_name
- location: westus
- admin_username: mylogin
- admin_password: Testpasswordxyz12!
-'''
-
-RETURN = '''
-id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/servers/sqlcrudtest-4645
-version:
- description:
- - The version of the server.
- returned: always
- type: str
- sample: 12.0
-state:
- description:
- - The state of the server.
- returned: always
- type: str
- sample: state
-fully_qualified_domain_name:
- description:
- - The fully qualified domain name of the server.
- returned: always
- type: str
- sample: sqlcrudtest-4645.database.windows.net
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from azure.mgmt.sql import SqlManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class Actions:
- NoAction, Create, Update, Delete = range(4)
-
-
-class AzureRMSqlServer(AzureRMModuleBase):
- """Configuration class for an Azure RM SQL Server resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- admin_username=dict(
- type='str'
- ),
- admin_password=dict(
- type='str',
- no_log=True
- ),
- version=dict(
- type='str'
- ),
- identity=dict(
- type='str'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.name = None
- self.parameters = dict()
- self.tags = None
-
- self.results = dict(changed=False)
- self.state = None
- self.to_do = Actions.NoAction
-
- super(AzureRMSqlServer, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- if key == "location":
- self.parameters.update({"location": kwargs[key]})
- elif key == "admin_username":
- self.parameters.update({"administrator_login": kwargs[key]})
- elif key == "admin_password":
- self.parameters.update({"administrator_login_password": kwargs[key]})
- elif key == "version":
- self.parameters.update({"version": kwargs[key]})
- elif key == "identity":
- self.parameters.update({"identity": {"type": kwargs[key]}})
-
- old_response = None
- response = None
- results = dict()
-
- resource_group = self.get_resource_group(self.resource_group)
-
- if "location" not in self.parameters:
- self.parameters["location"] = resource_group.location
-
- old_response = self.get_sqlserver()
-
- if not old_response:
- self.log("SQL Server instance doesn't exist")
- if self.state == 'absent':
- self.log("Old instance didn't exist")
- else:
- self.to_do = Actions.Create
- else:
- self.log("SQL Server instance already exists")
- if self.state == 'absent':
- self.to_do = Actions.Delete
- elif self.state == 'present':
- self.log("Need to check if SQL Server instance has to be deleted or may be updated")
- update_tags, newtags = self.update_tags(old_response.get('tags', dict()))
- if update_tags:
- self.tags = newtags
- self.to_do = Actions.Update
-
- if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
- self.log("Need to Create / Update the SQL Server instance")
-
- if self.check_mode:
- self.results['changed'] = True
- return self.results
-
- self.parameters['tags'] = self.tags
- response = self.create_update_sqlserver()
- response.pop('administrator_login_password', None)
-
- if not old_response:
- self.results['changed'] = True
- else:
- self.results['changed'] = old_response.__ne__(response)
- self.log("Creation / Update done")
- elif self.to_do == Actions.Delete:
- self.log("SQL Server instance deleted")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_sqlserver()
- # make sure instance is actually deleted, for some Azure resources, instance is hanging around
- # for some time after deletion -- this should be really fixed in Azure
- while self.get_sqlserver():
- time.sleep(20)
- else:
- self.log("SQL Server instance unchanged")
- self.results['changed'] = False
- response = old_response
-
- if response:
- self.results["id"] = response["id"]
- self.results["version"] = response["version"]
- self.results["state"] = response["state"]
- self.results["fully_qualified_domain_name"] = response["fully_qualified_domain_name"]
-
- return self.results
-
- def create_update_sqlserver(self):
- '''
- Creates or updates SQL Server with the specified configuration.
-
- :return: deserialized SQL Server instance state dictionary
- '''
- self.log("Creating / Updating the SQL Server instance {0}".format(self.name))
-
- try:
- response = self.sql_client.servers.create_or_update(self.resource_group,
- self.name,
- self.parameters)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the SQL Server instance.')
- self.fail("Error creating the SQL Server instance: {0}".format(str(exc)))
- return response.as_dict()
-
- def delete_sqlserver(self):
- '''
- Deletes specified SQL Server instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the SQL Server instance {0}".format(self.name))
- try:
- response = self.sql_client.servers.delete(self.resource_group,
- self.name)
- except CloudError as e:
- self.log('Error attempting to delete the SQL Server instance.')
- self.fail("Error deleting the SQL Server instance: {0}".format(str(e)))
-
- return True
-
- def get_sqlserver(self):
- '''
- Gets the properties of the specified SQL Server.
-
- :return: deserialized SQL Server instance state dictionary
- '''
- self.log("Checking if the SQL Server instance {0} is present".format(self.name))
- found = False
- try:
- response = self.sql_client.servers.get(self.resource_group,
- self.name)
- found = True
- self.log("Response : {0}".format(response))
- self.log("SQL Server instance : {0} found".format(response.name))
- except CloudError as e:
- self.log('Did not find the SQL Server instance.')
- if found is True:
- return response.as_dict()
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMSqlServer()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_sqlserver_info.py b/lib/ansible/modules/cloud/azure/azure_rm_sqlserver_info.py
deleted file mode 100644
index 2a90dd1929..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_sqlserver_info.py
+++ /dev/null
@@ -1,207 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_sqlserver_info
-version_added: "2.9"
-short_description: Get SQL Server facts
-description:
- - Get facts of SQL Server.
-
-options:
- resource_group:
- description:
- - The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
- required: True
- server_name:
- description:
- - The name of the server.
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get instance of SQL Server
- azure_rm_sqlserver_info:
- resource_group: myResourceGroup
- server_name: server_name
-
- - name: List instances of SQL Server
- azure_rm_sqlserver_info:
- resource_group: myResourceGroup
-'''
-
-RETURN = '''
-servers:
- description:
- - A list of dict results where the key is the name of the SQL Server and the values are the facts for that SQL Server.
- returned: always
- type: complex
- contains:
- sqlserver_name:
- description:
- - The key is the name of the server that the values relate to.
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Sql/servers/sqlcrudtest-4645
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: sqlcrudtest-4645
- type:
- description:
- - Resource type.
- returned: always
- type: str
- sample: Microsoft.Sql/servers
- location:
- description:
- - Resource location.
- returned: always
- type: str
- sample: japaneast
- kind:
- description:
- - Kind of sql server. This is metadata used for the Azure portal experience.
- returned: always
- type: str
- sample: v12.0
- version:
- description:
- - The version of the server.
- returned: always
- type: str
- sample: 12.0
- state:
- description:
- - The state of the server.
- returned: always
- type: str
- sample: Ready
- fully_qualified_domain_name:
- description:
- - The fully qualified domain name of the server.
- returned: always
- type: str
- sample: fully_qualified_domain_name
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.sql import SqlManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMSqlServerInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- server_name=dict(
- type='str'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False,
- )
- self.resource_group = None
- self.server_name = None
- super(AzureRMSqlServerInfo, self).__init__(self.module_arg_spec)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_sqlserver_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_sqlserver_facts' module has been renamed to 'azure_rm_sqlserver_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if (self.resource_group is not None and
- self.server_name is not None):
- self.results['servers'] = self.get()
- elif (self.resource_group is not None):
- self.results['servers'] = self.list_by_resource_group()
- return self.results
-
- def get(self):
- '''
- Gets facts of the specified SQL Server.
-
- :return: deserialized SQL Serverinstance state dictionary
- '''
- response = None
- results = {}
- try:
- response = self.sql_client.servers.get(resource_group_name=self.resource_group,
- server_name=self.server_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Servers.')
-
- if response is not None:
- results[response.name] = response.as_dict()
-
- return results
-
- def list_by_resource_group(self):
- '''
- Gets facts of the specified SQL Server.
-
- :return: deserialized SQL Serverinstance state dictionary
- '''
- response = None
- results = {}
- try:
- response = self.sql_client.servers.list_by_resource_group(resource_group_name=self.resource_group)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Servers.')
-
- if response is not None:
- for item in response:
- results[item.name] = item.as_dict()
-
- return results
-
-
-def main():
- AzureRMSqlServerInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_storageaccount.py b/lib/ansible/modules/cloud/azure/azure_rm_storageaccount.py
deleted file mode 100644
index d4158bbda8..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_storageaccount.py
+++ /dev/null
@@ -1,684 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_storageaccount
-version_added: "2.1"
-short_description: Manage Azure storage accounts
-description:
- - Create, update or delete a storage account.
-options:
- resource_group:
- description:
- - Name of the resource group to use.
- required: true
- aliases:
- - resource_group_name
- name:
- description:
- - Name of the storage account to update or create.
- state:
- description:
- - State of the storage account. Use C(present) to create or update a storage account and use C(absent) to delete an account.
- default: present
- choices:
- - absent
- - present
- location:
- description:
- - Valid Azure location. Defaults to location of the resource group.
- account_type:
- description:
- - Type of storage account. Required when creating a storage account.
- - C(Standard_ZRS) and C(Premium_LRS) accounts cannot be changed to other account types.
- - Other account types cannot be changed to C(Standard_ZRS) or C(Premium_LRS).
- choices:
- - Premium_LRS
- - Standard_GRS
- - Standard_LRS
- - StandardSSD_LRS
- - Standard_RAGRS
- - Standard_ZRS
- - Premium_ZRS
- aliases:
- - type
- custom_domain:
- description:
- - User domain assigned to the storage account.
- - Must be a dictionary with I(name) and I(use_sub_domain) keys where I(name) is the CNAME source.
- - Only one custom domain is supported per storage account at this time.
- - To clear the existing custom domain, use an empty string for the custom domain name property.
- - Can be added to an existing storage account. Will be ignored during storage account creation.
- aliases:
- - custom_dns_domain_suffix
- kind:
- description:
- - The kind of storage.
- default: 'Storage'
- choices:
- - Storage
- - StorageV2
- - BlobStorage
- version_added: "2.2"
- access_tier:
- description:
- - The access tier for this storage account. Required when I(kind=BlobStorage).
- choices:
- - Hot
- - Cool
- version_added: "2.4"
- force_delete_nonempty:
- description:
- - Attempt deletion if resource already exists and cannot be updated.
- type: bool
- aliases:
- - force
- https_only:
- description:
- - Allows https traffic only to storage service when set to C(true).
- type: bool
- version_added: "2.8"
- blob_cors:
- description:
- - Specifies CORS rules for the Blob service.
- - You can include up to five CorsRule elements in the request.
- - If no blob_cors elements are included in the argument list, nothing about CORS will be changed.
- - If you want to delete all CORS rules and disable CORS for the Blob service, explicitly set I(blob_cors=[]).
- type: list
- version_added: "2.8"
- suboptions:
- allowed_origins:
- description:
- - A list of origin domains that will be allowed via CORS, or "*" to allow all domains.
- type: list
- required: true
- allowed_methods:
- description:
- - A list of HTTP methods that are allowed to be executed by the origin.
- type: list
- required: true
- max_age_in_seconds:
- description:
- - The number of seconds that the client/browser should cache a preflight response.
- type: int
- required: true
- exposed_headers:
- description:
- - A list of response headers to expose to CORS clients.
- type: list
- required: true
- allowed_headers:
- description:
- - A list of headers allowed to be part of the cross-origin request.
- type: list
- required: true
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Chris Houseknecht (@chouseknecht)
- - Matt Davis (@nitzmahone)
-'''
-
-EXAMPLES = '''
- - name: remove account, if it exists
- azure_rm_storageaccount:
- resource_group: myResourceGroup
- name: clh0002
- state: absent
-
- - name: create an account
- azure_rm_storageaccount:
- resource_group: myResourceGroup
- name: clh0002
- type: Standard_RAGRS
- tags:
- testing: testing
- delete: on-exit
-
- - name: create an account with blob CORS
- azure_rm_storageaccount:
- resource_group: myResourceGroup
- name: clh002
- type: Standard_RAGRS
- blob_cors:
- - allowed_origins:
- - http://www.example.com/
- allowed_methods:
- - GET
- - POST
- allowed_headers:
- - x-ms-meta-data*
- - x-ms-meta-target*
- - x-ms-meta-abc
- exposed_headers:
- - x-ms-meta-*
- max_age_in_seconds: 200
-'''
-
-
-RETURN = '''
-state:
- description:
- - Current state of the storage account.
- returned: always
- type: complex
- contains:
- account_type:
- description:
- - Type of storage account.
- returned: always
- type: str
- sample: Standard_RAGRS
- custom_domain:
- description:
- - User domain assigned to the storage account.
- returned: always
- type: complex
- contains:
- name:
- description:
- - CNAME source.
- returned: always
- type: str
- sample: testaccount
- use_sub_domain:
- description:
- - Whether to use sub domain.
- returned: always
- type: bool
- sample: true
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/clh0003"
- location:
- description:
- - Valid Azure location. Defaults to location of the resource group.
- returned: always
- type: str
- sample: eastus2
- name:
- description:
- - Name of the storage account to update or create.
- returned: always
- type: str
- sample: clh0003
- primary_endpoints:
- description:
- - The URLs to retrieve the public I(blob), I(queue), or I(table) object from the primary location.
- returned: always
- type: dict
- sample: {
- "blob": "https://clh0003.blob.core.windows.net/",
- "queue": "https://clh0003.queue.core.windows.net/",
- "table": "https://clh0003.table.core.windows.net/"
- }
- primary_location:
- description:
- - The location of the primary data center for the storage account.
- returned: always
- type: str
- sample: eastus2
- provisioning_state:
- description:
- - The status of the storage account.
- - Possible values include C(Creating), C(ResolvingDNS), C(Succeeded).
- returned: always
- type: str
- sample: Succeeded
- resource_group:
- description:
- - The resource group's name.
- returned: always
- type: str
- sample: Testing
- secondary_endpoints:
- description:
- - The URLs to retrieve the public I(blob), I(queue), or I(table) object from the secondary location.
- returned: always
- type: dict
- sample: {
- "blob": "https://clh0003-secondary.blob.core.windows.net/",
- "queue": "https://clh0003-secondary.queue.core.windows.net/",
- "table": "https://clh0003-secondary.table.core.windows.net/"
- }
- secondary_location:
- description:
- - The location of the geo-replicated secondary for the storage account.
- returned: always
- type: str
- sample: centralus
- status_of_primary:
- description:
- - The status of the primary location of the storage account; either C(available) or C(unavailable).
- returned: always
- type: str
- sample: available
- status_of_secondary:
- description:
- - The status of the secondary location of the storage account; either C(available) or C(unavailable).
- returned: always
- type: str
- sample: available
- tags:
- description:
- - Resource tags.
- returned: always
- type: dict
- sample: { 'tags1': 'value1' }
- type:
- description:
- - The storage account type.
- returned: always
- type: str
- sample: "Microsoft.Storage/storageAccounts"
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.storage.cloudstorageaccount import CloudStorageAccount
- from azure.common import AzureMissingResourceHttpError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-import copy
-from ansible.module_utils.azure_rm_common import AZURE_SUCCESS_STATE, AzureRMModuleBase
-from ansible.module_utils._text import to_native
-
-cors_rule_spec = dict(
- allowed_origins=dict(type='list', elements='str', required=True),
- allowed_methods=dict(type='list', elements='str', required=True),
- max_age_in_seconds=dict(type='int', required=True),
- exposed_headers=dict(type='list', elements='str', required=True),
- allowed_headers=dict(type='list', elements='str', required=True),
-)
-
-
-def compare_cors(cors1, cors2):
- if len(cors1) != len(cors2):
- return False
- copy2 = copy.copy(cors2)
- for rule1 in cors1:
- matched = False
- for rule2 in copy2:
- if (rule1['max_age_in_seconds'] == rule2['max_age_in_seconds']
- and set(rule1['allowed_methods']) == set(rule2['allowed_methods'])
- and set(rule1['allowed_origins']) == set(rule2['allowed_origins'])
- and set(rule1['allowed_headers']) == set(rule2['allowed_headers'])
- and set(rule1['exposed_headers']) == set(rule2['exposed_headers'])):
- matched = True
- copy2.remove(rule2)
- if not matched:
- return False
- return True
-
-
-class AzureRMStorageAccount(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- account_type=dict(type='str',
- choices=['Premium_LRS', 'Standard_GRS', 'Standard_LRS', 'StandardSSD_LRS', 'Standard_RAGRS', 'Standard_ZRS', 'Premium_ZRS'],
- aliases=['type']),
- custom_domain=dict(type='dict', aliases=['custom_dns_domain_suffix']),
- location=dict(type='str'),
- name=dict(type='str', required=True),
- resource_group=dict(required=True, type='str', aliases=['resource_group_name']),
- state=dict(default='present', choices=['present', 'absent']),
- force_delete_nonempty=dict(type='bool', default=False, aliases=['force']),
- tags=dict(type='dict'),
- kind=dict(type='str', default='Storage', choices=['Storage', 'StorageV2', 'BlobStorage']),
- access_tier=dict(type='str', choices=['Hot', 'Cool']),
- https_only=dict(type='bool', default=False),
- blob_cors=dict(type='list', options=cors_rule_spec, elements='dict')
- )
-
- self.results = dict(
- changed=False,
- state=dict()
- )
-
- self.account_dict = None
- self.resource_group = None
- self.name = None
- self.state = None
- self.location = None
- self.account_type = None
- self.custom_domain = None
- self.tags = None
- self.force_delete_nonempty = None
- self.kind = None
- self.access_tier = None
- self.https_only = None
- self.blob_cors = None
-
- super(AzureRMStorageAccount, self).__init__(self.module_arg_spec,
- supports_check_mode=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- # Set default location
- self.location = resource_group.location
-
- if len(self.name) < 3 or len(self.name) > 24:
- self.fail("Parameter error: name length must be between 3 and 24 characters.")
-
- if self.custom_domain:
- if self.custom_domain.get('name', None) is None:
- self.fail("Parameter error: expecting custom_domain to have a name attribute of type string.")
- if self.custom_domain.get('use_sub_domain', None) is None:
- self.fail("Parameter error: expecting custom_domain to have a use_sub_domain "
- "attribute of type boolean.")
-
- self.account_dict = self.get_account()
-
- if self.state == 'present' and self.account_dict and \
- self.account_dict['provisioning_state'] != AZURE_SUCCESS_STATE:
- self.fail("Error: storage account {0} has not completed provisioning. State is {1}. Expecting state "
- "to be {2}.".format(self.name, self.account_dict['provisioning_state'], AZURE_SUCCESS_STATE))
-
- if self.account_dict is not None:
- self.results['state'] = self.account_dict
- else:
- self.results['state'] = dict()
-
- if self.state == 'present':
- if not self.account_dict:
- self.results['state'] = self.create_account()
- else:
- self.update_account()
- elif self.state == 'absent' and self.account_dict:
- self.delete_account()
- self.results['state'] = dict(Status='Deleted')
-
- return self.results
-
- def check_name_availability(self):
- self.log('Checking name availability for {0}'.format(self.name))
- try:
- response = self.storage_client.storage_accounts.check_name_availability(self.name)
- except CloudError as e:
- self.log('Error attempting to validate name.')
- self.fail("Error checking name availability: {0}".format(str(e)))
- if not response.name_available:
- self.log('Error name not available.')
- self.fail("{0} - {1}".format(response.message, response.reason))
-
- def get_account(self):
- self.log('Get properties for account {0}'.format(self.name))
- account_obj = None
- blob_service_props = None
- account_dict = None
-
- try:
- account_obj = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name)
- blob_service_props = self.storage_client.blob_services.get_service_properties(self.resource_group, self.name)
- except CloudError:
- pass
-
- if account_obj:
- account_dict = self.account_obj_to_dict(account_obj, blob_service_props)
-
- return account_dict
-
- def account_obj_to_dict(self, account_obj, blob_service_props=None):
- account_dict = dict(
- id=account_obj.id,
- name=account_obj.name,
- location=account_obj.location,
- resource_group=self.resource_group,
- type=account_obj.type,
- access_tier=(account_obj.access_tier.value
- if account_obj.access_tier is not None else None),
- sku_tier=account_obj.sku.tier.value,
- sku_name=account_obj.sku.name.value,
- provisioning_state=account_obj.provisioning_state.value,
- secondary_location=account_obj.secondary_location,
- status_of_primary=(account_obj.status_of_primary.value
- if account_obj.status_of_primary is not None else None),
- status_of_secondary=(account_obj.status_of_secondary.value
- if account_obj.status_of_secondary is not None else None),
- primary_location=account_obj.primary_location,
- https_only=account_obj.enable_https_traffic_only
- )
- account_dict['custom_domain'] = None
- if account_obj.custom_domain:
- account_dict['custom_domain'] = dict(
- name=account_obj.custom_domain.name,
- use_sub_domain=account_obj.custom_domain.use_sub_domain
- )
-
- account_dict['primary_endpoints'] = None
- if account_obj.primary_endpoints:
- account_dict['primary_endpoints'] = dict(
- blob=account_obj.primary_endpoints.blob,
- queue=account_obj.primary_endpoints.queue,
- table=account_obj.primary_endpoints.table
- )
- account_dict['secondary_endpoints'] = None
- if account_obj.secondary_endpoints:
- account_dict['secondary_endpoints'] = dict(
- blob=account_obj.secondary_endpoints.blob,
- queue=account_obj.secondary_endpoints.queue,
- table=account_obj.secondary_endpoints.table
- )
- account_dict['tags'] = None
- if account_obj.tags:
- account_dict['tags'] = account_obj.tags
- if blob_service_props and blob_service_props.cors and blob_service_props.cors.cors_rules:
- account_dict['blob_cors'] = [dict(
- allowed_origins=[to_native(y) for y in x.allowed_origins],
- allowed_methods=[to_native(y) for y in x.allowed_methods],
- max_age_in_seconds=x.max_age_in_seconds,
- exposed_headers=[to_native(y) for y in x.exposed_headers],
- allowed_headers=[to_native(y) for y in x.allowed_headers]
- ) for x in blob_service_props.cors.cors_rules]
- return account_dict
-
- def update_account(self):
- self.log('Update storage account {0}'.format(self.name))
- if bool(self.https_only) != bool(self.account_dict.get('https_only')):
- self.results['changed'] = True
- self.account_dict['https_only'] = self.https_only
- if not self.check_mode:
- try:
- parameters = self.storage_models.StorageAccountUpdateParameters(enable_https_traffic_only=self.https_only)
- self.storage_client.storage_accounts.update(self.resource_group,
- self.name,
- parameters)
- except Exception as exc:
- self.fail("Failed to update account type: {0}".format(str(exc)))
-
- if self.account_type:
- if self.account_type != self.account_dict['sku_name']:
- # change the account type
- SkuName = self.storage_models.SkuName
- if self.account_dict['sku_name'] in [SkuName.premium_lrs, SkuName.standard_zrs]:
- self.fail("Storage accounts of type {0} and {1} cannot be changed.".format(
- SkuName.premium_lrs, SkuName.standard_zrs))
- if self.account_type in [SkuName.premium_lrs, SkuName.standard_zrs]:
- self.fail("Storage account of type {0} cannot be changed to a type of {1} or {2}.".format(
- self.account_dict['sku_name'], SkuName.premium_lrs, SkuName.standard_zrs))
-
- self.results['changed'] = True
- self.account_dict['sku_name'] = self.account_type
-
- if self.results['changed'] and not self.check_mode:
- # Perform the update. The API only allows changing one attribute per call.
- try:
- self.log("sku_name: %s" % self.account_dict['sku_name'])
- self.log("sku_tier: %s" % self.account_dict['sku_tier'])
- sku = self.storage_models.Sku(name=SkuName(self.account_dict['sku_name']))
- sku.tier = self.storage_models.SkuTier(self.account_dict['sku_tier'])
- parameters = self.storage_models.StorageAccountUpdateParameters(sku=sku)
- self.storage_client.storage_accounts.update(self.resource_group,
- self.name,
- parameters)
- except Exception as exc:
- self.fail("Failed to update account type: {0}".format(str(exc)))
-
- if self.custom_domain:
- if not self.account_dict['custom_domain'] or self.account_dict['custom_domain'] != self.custom_domain:
- self.results['changed'] = True
- self.account_dict['custom_domain'] = self.custom_domain
-
- if self.results['changed'] and not self.check_mode:
- new_domain = self.storage_models.CustomDomain(name=self.custom_domain['name'],
- use_sub_domain=self.custom_domain['use_sub_domain'])
- parameters = self.storage_models.StorageAccountUpdateParameters(custom_domain=new_domain)
- try:
- self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters)
- except Exception as exc:
- self.fail("Failed to update custom domain: {0}".format(str(exc)))
-
- if self.access_tier:
- if not self.account_dict['access_tier'] or self.account_dict['access_tier'] != self.access_tier:
- self.results['changed'] = True
- self.account_dict['access_tier'] = self.access_tier
-
- if self.results['changed'] and not self.check_mode:
- parameters = self.storage_models.StorageAccountUpdateParameters(access_tier=self.access_tier)
- try:
- self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters)
- except Exception as exc:
- self.fail("Failed to update access tier: {0}".format(str(exc)))
-
- update_tags, self.account_dict['tags'] = self.update_tags(self.account_dict['tags'])
- if update_tags:
- self.results['changed'] = True
- if not self.check_mode:
- parameters = self.storage_models.StorageAccountUpdateParameters(tags=self.account_dict['tags'])
- try:
- self.storage_client.storage_accounts.update(self.resource_group, self.name, parameters)
- except Exception as exc:
- self.fail("Failed to update tags: {0}".format(str(exc)))
-
- if self.blob_cors and not compare_cors(self.account_dict.get('blob_cors', []), self.blob_cors):
- self.results['changed'] = True
- if not self.check_mode:
- self.set_blob_cors()
-
- def create_account(self):
- self.log("Creating account {0}".format(self.name))
-
- if not self.location:
- self.fail('Parameter error: location required when creating a storage account.')
-
- if not self.account_type:
- self.fail('Parameter error: account_type required when creating a storage account.')
-
- if not self.access_tier and self.kind == 'BlobStorage':
- self.fail('Parameter error: access_tier required when creating a storage account of type BlobStorage.')
-
- self.check_name_availability()
- self.results['changed'] = True
-
- if self.check_mode:
- account_dict = dict(
- location=self.location,
- account_type=self.account_type,
- name=self.name,
- resource_group=self.resource_group,
- enable_https_traffic_only=self.https_only,
- tags=dict()
- )
- if self.tags:
- account_dict['tags'] = self.tags
- if self.blob_cors:
- account_dict['blob_cors'] = self.blob_cors
- return account_dict
- sku = self.storage_models.Sku(name=self.storage_models.SkuName(self.account_type))
- sku.tier = self.storage_models.SkuTier.standard if 'Standard' in self.account_type else \
- self.storage_models.SkuTier.premium
- parameters = self.storage_models.StorageAccountCreateParameters(sku=sku,
- kind=self.kind,
- location=self.location,
- tags=self.tags,
- access_tier=self.access_tier)
- self.log(str(parameters))
- try:
- poller = self.storage_client.storage_accounts.create(self.resource_group, self.name, parameters)
- self.get_poller_result(poller)
- except CloudError as e:
- self.log('Error creating storage account.')
- self.fail("Failed to create account: {0}".format(str(e)))
- if self.blob_cors:
- self.set_blob_cors()
- # the poller doesn't actually return anything
- return self.get_account()
-
- def delete_account(self):
- if self.account_dict['provisioning_state'] == self.storage_models.ProvisioningState.succeeded.value and \
- not self.force_delete_nonempty and self.account_has_blob_containers():
- self.fail("Account contains blob containers. Is it in use? Use the force_delete_nonempty option to attempt deletion.")
-
- self.log('Delete storage account {0}'.format(self.name))
- self.results['changed'] = True
- if not self.check_mode:
- try:
- status = self.storage_client.storage_accounts.delete(self.resource_group, self.name)
- self.log("delete status: ")
- self.log(str(status))
- except CloudError as e:
- self.fail("Failed to delete the account: {0}".format(str(e)))
- return True
-
- def account_has_blob_containers(self):
- '''
- If there are blob containers, then there are likely VMs depending on this account and it should
- not be deleted.
- '''
- self.log('Checking for existing blob containers')
- blob_service = self.get_blob_client(self.resource_group, self.name)
- try:
- response = blob_service.list_containers()
- except AzureMissingResourceHttpError:
- # No blob storage available?
- return False
-
- if len(response.items) > 0:
- return True
- return False
-
- def set_blob_cors(self):
- try:
- cors_rules = self.storage_models.CorsRules(cors_rules=[self.storage_models.CorsRule(**x) for x in self.blob_cors])
- self.storage_client.blob_services.set_service_properties(self.resource_group,
- self.name,
- self.storage_models.BlobServiceProperties(cors=cors_rules))
- except Exception as exc:
- self.fail("Failed to set CORS rules: {0}".format(str(exc)))
-
-
-def main():
- AzureRMStorageAccount()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_storageaccount_info.py b/lib/ansible/modules/cloud/azure/azure_rm_storageaccount_info.py
deleted file mode 100644
index 5849e9e47f..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_storageaccount_info.py
+++ /dev/null
@@ -1,557 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_storageaccount_info
-
-version_added: "2.9"
-
-short_description: Get storage account facts
-
-description:
- - Get facts for one storage account or all storage accounts within a resource group.
-
-options:
- name:
- description:
- - Only show results for a specific account.
- resource_group:
- description:
- - Limit results to a resource group. Required when filtering by name.
- aliases:
- - resource_group_name
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
- show_connection_string:
- description:
- - Show the connection string for each of the storageaccount's endpoints.
- - For convenient usage, C(show_connection_string) will also show the access keys for each of the storageaccount's endpoints.
- - Note that it will cost a lot of time when list all storageaccount rather than query a single one.
- type: bool
- version_added: "2.8"
- show_blob_cors:
- description:
- - Show the blob CORS settings for each blob related to the storage account.
- - Querying all storage accounts will take a long time.
- type: bool
- version_added: "2.8"
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Chris Houseknecht (@chouseknecht)
- - Matt Davis (@nitzmahone)
-
-'''
-
-EXAMPLES = '''
- - name: Get facts for one account
- azure_rm_storageaccount_info:
- resource_group: myResourceGroup
- name: clh0002
-
- - name: Get facts for all accounts in a resource group
- azure_rm_storageaccount_info:
- resource_group: myResourceGroup
-
- - name: Get facts for all accounts by tags
- azure_rm_storageaccount_info:
- tags:
- - testing
- - foo:bar
-'''
-
-RETURN = '''
-azure_storageaccounts:
- description:
- - List of storage account dicts.
- returned: always
- type: list
- example: [{
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/myResourceGroups/testing/providers/Microsoft.Storage/storageAccounts/testaccount001",
- "location": "eastus2",
- "name": "testaccount001",
- "properties": {
- "accountType": "Standard_LRS",
- "creationTime": "2016-03-28T02:46:58.290113Z",
- "primaryEndpoints": {
- "blob": "https://testaccount001.blob.core.windows.net/",
- "file": "https://testaccount001.file.core.windows.net/",
- "queue": "https://testaccount001.queue.core.windows.net/",
- "table": "https://testaccount001.table.core.windows.net/"
- },
- "primaryLocation": "eastus2",
- "provisioningState": "Succeeded",
- "statusOfPrimary": "Available"
- },
- "tags": {},
- "type": "Microsoft.Storage/storageAccounts"
- }]
-storageaccounts:
- description:
- - List of storage account dicts in resource module's parameter format.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Storage/storageAccounts/t
- estaccount001"
- name:
- description:
- - Name of the storage account to update or create.
- returned: always
- type: str
- sample: testaccount001
- location:
- description:
- - Valid Azure location. Defaults to location of the resource group.
- returned: always
- type: str
- sample: eastus
- account_type:
- description:
- - Type of storage account.
- - C(Standard_ZRS) and C(Premium_LRS) accounts cannot be changed to other account types.
- - Other account types cannot be changed to C(Standard_ZRS) or C(Premium_LRS).
- returned: always
- type: str
- sample: Standard_ZRS
- custom_domain:
- description:
- - User domain assigned to the storage account.
- - Must be a dictionary with I(name) and I(use_sub_domain) keys where I(name) is the CNAME source.
- returned: always
- type: complex
- contains:
- name:
- description:
- - CNAME source.
- returned: always
- type: str
- sample: testaccount
- use_sub_domain:
- description:
- - Whether to use sub domain.
- returned: always
- type: bool
- sample: true
- kind:
- description:
- - The kind of storage.
- returned: always
- type: str
- sample: Storage
- access_tier:
- description:
- - The access tier for this storage account.
- returned: always
- type: str
- sample: Hot
- https_only:
- description:
- - Allows https traffic only to storage service when set to C(true).
- returned: always
- type: bool
- sample: false
- provisioning_state:
- description:
- - The status of the storage account at the time the operation was called.
- - Possible values include C(Creating), C(ResolvingDNS), C(Succeeded).
- returned: always
- type: str
- sample: Succeeded
- secondary_location:
- description:
- - The location of the geo-replicated secondary for the storage account.
- - Only available if the I(account_type=Standard_GRS) or I(account_type=Standard_RAGRS).
- returned: always
- type: str
- sample: westus
- status_of_primary:
- description:
- - Status of the primary location of the storage account; either C(available) or C(unavailable).
- returned: always
- type: str
- sample: available
- status_of_secondary:
- description:
- - Status of the secondary location of the storage account; either C(available) or C(unavailable).
- returned: always
- type: str
- sample: available
- primary_location:
- description:
- - The location of the primary data center for the storage account.
- returned: always
- type: str
- sample: eastus
- primary_endpoints:
- description:
- - URLs to retrieve a public I(blob), I(queue), or I(table) object.
- - Note that C(Standard_ZRS) and C(Premium_LRS) accounts only return the blob endpoint.
- returned: always
- type: complex
- contains:
- blob:
- description:
- - The primary blob endpoint and connection string.
- returned: always
- type: complex
- contains:
- endpoint:
- description:
- - The primary blob endpoint.
- returned: always
- type: str
- sample: "https://testaccount001.blob.core.windows.net/"
- connectionstring:
- description:
- - Connectionstring of the blob endpoint.
- returned: always
- type: str
- sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;BlobEndpoint=X"
- queue:
- description:
- - The primary queue endpoint and connection string.
- returned: always
- type: complex
- contains:
- endpoint:
- description:
- - The primary queue endpoint.
- returned: always
- type: str
- sample: "https://testaccount001.queue.core.windows.net/"
- connectionstring:
- description:
- - Connectionstring of the queue endpoint.
- returned: always
- type: str
- sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;QueueEndpoint=X"
- table:
- description:
- - The primary table endpoint and connection string.
- returned: always
- type: complex
- contains:
- endpoint:
- description:
- - The primary table endpoint.
- returned: always
- type: str
- sample: "https://testaccount001.table.core.windows.net/"
- connectionstring:
- description:
- - Connectionstring of the table endpoint.
- returned: always
- type: str
- sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;TableEndpoint=X"
- key:
- description:
- - The account key for the primary_endpoints
- returned: always
- type: str
- sample: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- secondary_endpoints:
- description:
- - The URLs to retrieve a public I(blob), I(queue), or I(table) object from the secondary location.
- - Only available if the SKU I(name=Standard_RAGRS).
- returned: always
- type: complex
- contains:
- blob:
- description:
- - The secondary blob endpoint and connection string.
- returned: always
- type: complex
- contains:
- endpoint:
- description:
- - The secondary blob endpoint.
- returned: always
- type: str
- sample: "https://testaccount001.blob.core.windows.net/"
- connectionstring:
- description:
- - Connectionstring of the blob endpoint.
- returned: always
- type: str
- sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;BlobEndpoint=X"
- queue:
- description:
- - The secondary queue endpoint and connection string.
- returned: always
- type: complex
- contains:
- endpoint:
- description:
- - The secondary queue endpoint.
- returned: always
- type: str
- sample: "https://testaccount001.queue.core.windows.net/"
- connectionstring:
- description:
- - Connectionstring of the queue endpoint.
- returned: always
- type: str
- sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;QueueEndpoint=X"
- table:
- description:
- - The secondary table endpoint and connection string.
- returned: always
- type: complex
- contains:
- endpoint:
- description:
- - The secondary table endpoint.
- returned: always
- type: str
- sample: "https://testaccount001.table.core.windows.net/"
- connectionstring:
- description:
- - Connectionstring of the table endpoint.
- returned: always
- type: str
- sample: "DefaultEndpointsProtocol=https;EndpointSuffix=core.windows.net;AccountName=X;AccountKey=X;TableEndpoint=X"
- key:
- description:
- - The account key for the secondary_endpoints
- sample: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- tags:
- description:
- - Resource tags.
- returned: always
- type: dict
- sample: { "tag1": "abc" }
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils._text import to_native
-
-
-AZURE_OBJECT_CLASS = 'StorageAccount'
-
-
-class AzureRMStorageAccountInfo(AzureRMModuleBase):
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str'),
- resource_group=dict(type='str', aliases=['resource_group_name']),
- tags=dict(type='list'),
- show_connection_string=dict(type='bool'),
- show_blob_cors=dict(type='bool')
- )
-
- self.results = dict(
- changed=False,
- storageaccounts=[]
- )
-
- self.name = None
- self.resource_group = None
- self.tags = None
- self.show_connection_string = None
- self.show_blob_cors = None
-
- super(AzureRMStorageAccountInfo, self).__init__(self.module_arg_spec,
- supports_tags=False,
- facts_module=True)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_storageaccount_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_storageaccount_facts' module has been renamed to 'azure_rm_storageaccount_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name and not self.resource_group:
- self.fail("Parameter error: resource group required when filtering by name.")
-
- results = []
- if self.name:
- results = self.get_account()
- elif self.resource_group:
- results = self.list_resource_group()
- else:
- results = self.list_all()
-
- filtered = self.filter_tag(results)
-
- if is_old_facts:
- self.results['ansible_facts'] = {
- 'azure_storageaccounts': self.serialize(filtered),
- 'storageaccounts': self.format_to_dict(filtered),
- }
- self.results['storageaccounts'] = self.format_to_dict(filtered)
- return self.results
-
- def get_account(self):
- self.log('Get properties for account {0}'.format(self.name))
- account = None
- try:
- account = self.storage_client.storage_accounts.get_properties(self.resource_group, self.name)
- return [account]
- except CloudError:
- pass
- return []
-
- def list_resource_group(self):
- self.log('List items')
- try:
- response = self.storage_client.storage_accounts.list_by_resource_group(self.resource_group)
- except Exception as exc:
- self.fail("Error listing for resource group {0} - {1}".format(self.resource_group, str(exc)))
-
- return response
-
- def list_all(self):
- self.log('List all items')
- try:
- response = self.storage_client.storage_accounts.list()
- except Exception as exc:
- self.fail("Error listing all items - {0}".format(str(exc)))
-
- return response
-
- def filter_tag(self, raw):
- return [item for item in raw if self.has_tags(item.tags, self.tags)]
-
- def serialize(self, raw):
- return [self.serialize_obj(item, AZURE_OBJECT_CLASS) for item in raw]
-
- def format_to_dict(self, raw):
- return [self.account_obj_to_dict(item) for item in raw]
-
- def account_obj_to_dict(self, account_obj, blob_service_props=None):
- account_dict = dict(
- id=account_obj.id,
- name=account_obj.name,
- location=account_obj.location,
- access_tier=(account_obj.access_tier.value
- if account_obj.access_tier is not None else None),
- account_type=account_obj.sku.name.value,
- kind=account_obj.kind.value if account_obj.kind else None,
- provisioning_state=account_obj.provisioning_state.value,
- secondary_location=account_obj.secondary_location,
- status_of_primary=(account_obj.status_of_primary.value
- if account_obj.status_of_primary is not None else None),
- status_of_secondary=(account_obj.status_of_secondary.value
- if account_obj.status_of_secondary is not None else None),
- primary_location=account_obj.primary_location,
- https_only=account_obj.enable_https_traffic_only
- )
-
- id_dict = self.parse_resource_to_dict(account_obj.id)
- account_dict['resource_group'] = id_dict.get('resource_group')
- account_key = self.get_connectionstring(account_dict['resource_group'], account_dict['name'])
- account_dict['custom_domain'] = None
- if account_obj.custom_domain:
- account_dict['custom_domain'] = dict(
- name=account_obj.custom_domain.name,
- use_sub_domain=account_obj.custom_domain.use_sub_domain
- )
-
- account_dict['primary_endpoints'] = None
- if account_obj.primary_endpoints:
- account_dict['primary_endpoints'] = dict(
- blob=self.format_endpoint_dict(account_dict['name'], account_key[0], account_obj.primary_endpoints.blob, 'blob'),
- queue=self.format_endpoint_dict(account_dict['name'], account_key[0], account_obj.primary_endpoints.queue, 'queue'),
- table=self.format_endpoint_dict(account_dict['name'], account_key[0], account_obj.primary_endpoints.table, 'table')
- )
- if account_key[0]:
- account_dict['primary_endpoints']['key'] = '{0}'.format(account_key[0])
- account_dict['secondary_endpoints'] = None
- if account_obj.secondary_endpoints:
- account_dict['secondary_endpoints'] = dict(
- blob=self.format_endpoint_dict(account_dict['name'], account_key[1], account_obj.primary_endpoints.blob, 'blob'),
- queue=self.format_endpoint_dict(account_dict['name'], account_key[1], account_obj.primary_endpoints.queue, 'queue'),
- table=self.format_endpoint_dict(account_dict['name'], account_key[1], account_obj.primary_endpoints.table, 'table'),
- )
- if account_key[1]:
- account_dict['secondary_endpoints']['key'] = '{0}'.format(account_key[1])
- account_dict['tags'] = None
- if account_obj.tags:
- account_dict['tags'] = account_obj.tags
- blob_service_props = self.get_blob_service_props(account_dict['resource_group'], account_dict['name'])
- if blob_service_props and blob_service_props.cors and blob_service_props.cors.cors_rules:
- account_dict['blob_cors'] = [dict(
- allowed_origins=to_native(x.allowed_origins),
- allowed_methods=to_native(x.allowed_methods),
- max_age_in_seconds=x.max_age_in_seconds,
- exposed_headers=to_native(x.exposed_headers),
- allowed_headers=to_native(x.allowed_headers)
- ) for x in blob_service_props.cors.cors_rules]
- return account_dict
-
- def format_endpoint_dict(self, name, key, endpoint, storagetype, protocol='https'):
- result = dict(endpoint=endpoint)
- if key:
- result['connectionstring'] = 'DefaultEndpointsProtocol={0};EndpointSuffix={1};AccountName={2};AccountKey={3};{4}Endpoint={5}'.format(
- protocol,
- self._cloud_environment.suffixes.storage_endpoint,
- name,
- key,
- str.title(storagetype),
- endpoint)
- return result
-
- def get_blob_service_props(self, resource_group, name):
- if not self.show_blob_cors:
- return None
- try:
- blob_service_props = self.storage_client.blob_services.get_service_properties(resource_group, name)
- return blob_service_props
- except Exception:
- pass
- return None
-
- def get_connectionstring(self, resource_group, name):
- keys = ['', '']
- if not self.show_connection_string:
- return keys
- try:
- cred = self.storage_client.storage_accounts.list_keys(resource_group, name)
- # get the following try catch from CLI
- try:
- keys = [cred.keys[0].value, cred.keys[1].value]
- except AttributeError:
- keys = [cred.key1, cred.key2]
- except Exception:
- pass
- return keys
-
-
-def main():
- AzureRMStorageAccountInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_storageblob.py b/lib/ansible/modules/cloud/azure/azure_rm_storageblob.py
deleted file mode 100644
index d77255d9cb..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_storageblob.py
+++ /dev/null
@@ -1,548 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_storageblob
-short_description: Manage blob containers and blob objects
-version_added: "2.1"
-description:
- - Create, update and delete blob containers and blob objects.
- - Use to upload a file and store it as a blob object, or download a blob object to a file.
-options:
- storage_account_name:
- description:
- - Name of the storage account to use.
- required: true
- aliases:
- - account_name
- - storage_account
- blob:
- description:
- - Name of a blob object within the container.
- aliases:
- - blob_name
- blob_type:
- description:
- - Type of blob object.
- default: block
- choices:
- - block
- - page
- version_added: "2.5"
- container:
- description:
- - Name of a blob container within the storage account.
- required: true
- aliases:
- - container_name
- content_type:
- description:
- - Set the blob content-type header. For example C(image/png).
- cache_control:
- description:
- - Set the blob cache-control header.
- content_disposition:
- description:
- - Set the blob content-disposition header.
- content_encoding:
- description:
- - Set the blob encoding header.
- content_language:
- description:
- - Set the blob content-language header.
- content_md5:
- description:
- - Set the blob md5 hash value.
- dest:
- description:
- - Destination file path. Use with state C(present) to download a blob.
- aliases:
- - destination
- force:
- description:
- - Overwrite existing blob or file when uploading or downloading. Force deletion of a container that contains blobs.
- type: bool
- default: no
- resource_group:
- description:
- - Name of the resource group to use.
- required: true
- aliases:
- - resource_group_name
- src:
- description:
- - Source file path. Use with state C(present) to upload a blob.
- aliases:
- - source
- state:
- description:
- - State of a container or blob.
- - Use state C(absent) with a container value only to delete a container. Include a blob value to remove
- a specific blob. A container will not be deleted, if it contains blobs. Use the I(force) option to override,
- deleting the container and all associated blobs.
- - Use state C(present) to create or update a container and upload or download a blob. If the container
- does not exist, it will be created. If it exists, it will be updated with configuration options. Provide
- a blob name and either src or dest to upload or download. Provide a src path to upload and a dest path
- to download. If a blob (uploading) or a file (downloading) already exists, it will not be overwritten
- unless I(force=true).
- default: present
- choices:
- - absent
- - present
- public_access:
- description:
- - A container's level of public access. By default containers are private.
- - Can only be set at time of container creation.
- choices:
- - container
- - blob
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Chris Houseknecht (@chouseknecht)
- - Matt Davis (@nitzmahone)
-
-'''
-
-EXAMPLES = '''
-- name: Remove container foo
- azure_rm_storageblob:
- resource_group: myResourceGroup
- storage_account_name: clh0002
- container: foo
- state: absent
-
-- name: Create container foo and upload a file
- azure_rm_storageblob:
- resource_group: myResourceGroup
- storage_account_name: clh0002
- container: foo
- blob: graylog.png
- src: ./files/graylog.png
- public_access: container
- content_type: 'application/image'
-
-- name: Download the file
- azure_rm_storageblob:
- resource_group: myResourceGroup
- storage_account_name: clh0002
- container: foo
- blob: graylog.png
- dest: ~/tmp/images/graylog.png
-'''
-
-RETURN = '''
-blob:
- description:
- - Facts about the current state of the blob.
- returned: when a blob is operated on
- type: dict
- sample: {
- "content_length": 136532,
- "content_settings": {
- "cache_control": null,
- "content_disposition": null,
- "content_encoding": null,
- "content_language": null,
- "content_md5": null,
- "content_type": "application/image"
- },
- "last_modified": "09-Mar-2016 22:08:25 +0000",
- "name": "graylog.png",
- "tags": {},
- "type": "BlockBlob"
- }
-container:
- description:
- - Facts about the current state of the selected container.
- returned: always
- type: dict
- sample: {
- "last_modified": "09-Mar-2016 19:28:26 +0000",
- "name": "foo",
- "tags": {}
- }
-'''
-
-import os
-
-try:
- from azure.storage.blob.models import ContentSettings
- from azure.common import AzureMissingResourceHttpError, AzureHttpError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-
-class AzureRMStorageBlob(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- storage_account_name=dict(required=True, type='str', aliases=['account_name', 'storage_account']),
- blob=dict(type='str', aliases=['blob_name']),
- blob_type=dict(type='str', default='block', choices=['block', 'page']),
- container=dict(required=True, type='str', aliases=['container_name']),
- dest=dict(type='path', aliases=['destination']),
- force=dict(type='bool', default=False),
- resource_group=dict(required=True, type='str', aliases=['resource_group_name']),
- src=dict(type='str', aliases=['source']),
- state=dict(type='str', default='present', choices=['absent', 'present']),
- public_access=dict(type='str', choices=['container', 'blob']),
- content_type=dict(type='str'),
- content_encoding=dict(type='str'),
- content_language=dict(type='str'),
- content_disposition=dict(type='str'),
- cache_control=dict(type='str'),
- content_md5=dict(type='str'),
- )
-
- mutually_exclusive = [('src', 'dest')]
-
- self.blob_client = None
- self.blob_details = None
- self.storage_account_name = None
- self.blob = None
- self.blob_obj = None
- self.blob_type = None
- self.container = None
- self.container_obj = None
- self.dest = None
- self.force = None
- self.resource_group = None
- self.src = None
- self.state = None
- self.tags = None
- self.public_access = None
- self.results = dict(
- changed=False,
- actions=[],
- container=dict(),
- blob=dict()
- )
-
- super(AzureRMStorageBlob, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- mutually_exclusive=mutually_exclusive,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- self.results['check_mode'] = self.check_mode
-
- # add file path validation
-
- self.blob_client = self.get_blob_client(self.resource_group, self.storage_account_name, self.blob_type)
- self.container_obj = self.get_container()
-
- if self.blob is not None:
- self.blob_obj = self.get_blob()
-
- if self.state == 'present':
- if not self.container_obj:
- # create the container
- self.create_container()
- elif self.container_obj and not self.blob:
- # update container attributes
- update_tags, self.container_obj['tags'] = self.update_tags(self.container_obj.get('tags'))
- if update_tags:
- self.update_container_tags(self.container_obj['tags'])
-
- if self.blob:
- # create, update or download blob
- if self.src and self.src_is_valid():
- if self.blob_obj and not self.force:
- self.log("Cannot upload to {0}. Blob with that name already exists. "
- "Use the force option".format(self.blob))
- else:
- self.upload_blob()
- elif self.dest and self.dest_is_valid():
- self.download_blob()
-
- update_tags, self.blob_obj['tags'] = self.update_tags(self.blob_obj.get('tags'))
- if update_tags:
- self.update_blob_tags(self.blob_obj['tags'])
-
- if self.blob_content_settings_differ():
- self.update_blob_content_settings()
-
- elif self.state == 'absent':
- if self.container_obj and not self.blob:
- # Delete container
- if self.container_has_blobs():
- if self.force:
- self.delete_container()
- else:
- self.log("Cannot delete container {0}. It contains blobs. Use the force option.".format(
- self.container))
- else:
- self.delete_container()
- elif self.container_obj and self.blob_obj:
- # Delete blob
- self.delete_blob()
-
- # until we sort out how we want to do this globally
- del self.results['actions']
- return self.results
-
- def get_container(self):
- result = {}
- container = None
- if self.container:
- try:
- container = self.blob_client.get_container_properties(self.container)
- except AzureMissingResourceHttpError:
- pass
- if container:
- result = dict(
- name=container.name,
- tags=container.metadata,
- last_modified=container.properties.last_modified.strftime('%d-%b-%Y %H:%M:%S %z'),
- )
- return result
-
- def get_blob(self):
- result = dict()
- blob = None
- if self.blob:
- try:
- blob = self.blob_client.get_blob_properties(self.container, self.blob)
- except AzureMissingResourceHttpError:
- pass
- if blob:
- result = dict(
- name=blob.name,
- tags=blob.metadata,
- last_modified=blob.properties.last_modified.strftime('%d-%b-%Y %H:%M:%S %z'),
- type=blob.properties.blob_type,
- content_length=blob.properties.content_length,
- content_settings=dict(
- content_type=blob.properties.content_settings.content_type,
- content_encoding=blob.properties.content_settings.content_encoding,
- content_language=blob.properties.content_settings.content_language,
- content_disposition=blob.properties.content_settings.content_disposition,
- cache_control=blob.properties.content_settings.cache_control,
- content_md5=blob.properties.content_settings.content_md5
- )
- )
- return result
-
- def create_container(self):
- self.log('Create container %s' % self.container)
-
- tags = None
- if not self.blob and self.tags:
- # when a blob is present, then tags are assigned at the blob level
- tags = self.tags
-
- if not self.check_mode:
- try:
- self.blob_client.create_container(self.container, metadata=tags, public_access=self.public_access)
- except AzureHttpError as exc:
- self.fail("Error creating container {0} - {1}".format(self.container, str(exc)))
- self.container_obj = self.get_container()
- self.results['changed'] = True
- self.results['actions'].append('created container {0}'.format(self.container))
- self.results['container'] = self.container_obj
-
- def upload_blob(self):
- content_settings = None
- if self.content_type or self.content_encoding or self.content_language or self.content_disposition or \
- self.cache_control or self.content_md5:
- content_settings = ContentSettings(
- content_type=self.content_type,
- content_encoding=self.content_encoding,
- content_language=self.content_language,
- content_disposition=self.content_disposition,
- cache_control=self.cache_control,
- content_md5=self.content_md5
- )
- if not self.check_mode:
- try:
- self.blob_client.create_blob_from_path(self.container, self.blob, self.src,
- metadata=self.tags, content_settings=content_settings)
- except AzureHttpError as exc:
- self.fail("Error creating blob {0} - {1}".format(self.blob, str(exc)))
-
- self.blob_obj = self.get_blob()
- self.results['changed'] = True
- self.results['actions'].append('created blob {0} from {1}'.format(self.blob, self.src))
- self.results['container'] = self.container_obj
- self.results['blob'] = self.blob_obj
-
- def download_blob(self):
- if not self.check_mode:
- try:
- self.blob_client.get_blob_to_path(self.container, self.blob, self.dest)
- except Exception as exc:
- self.fail("Failed to download blob {0}:{1} to {2} - {3}".format(self.container,
- self.blob,
- self.dest,
- exc))
- self.results['changed'] = True
- self.results['actions'].append('downloaded blob {0}:{1} to {2}'.format(self.container,
- self.blob,
- self.dest))
-
- self.results['container'] = self.container_obj
- self.results['blob'] = self.blob_obj
-
- def src_is_valid(self):
- if not os.path.isfile(self.src):
- self.fail("The source path must be a file.")
- if os.access(self.src, os.R_OK):
- return True
- self.fail("Failed to access {0}. Make sure the file exists and that you have "
- "read access.".format(self.src))
-
- def dest_is_valid(self):
- if not self.check_mode:
- if not os.path.basename(self.dest):
- # dest is a directory
- if os.path.isdir(self.dest):
- self.log("Path is dir. Appending blob name.")
- self.dest += self.blob
- else:
- try:
- self.log('Attempting to makedirs {0}'.format(self.dest))
- os.makedirs(self.dest)
- except IOError as exc:
- self.fail("Failed to create directory {0} - {1}".format(self.dest, str(exc)))
- self.dest += self.blob
- else:
- # does path exist without basename
- file_name = os.path.basename(self.dest)
- path = self.dest.replace(file_name, '')
- self.log('Checking path {0}'.format(path))
- if not os.path.isdir(path):
- try:
- self.log('Attempting to makedirs {0}'.format(path))
- os.makedirs(path)
- except IOError as exc:
- self.fail("Failed to create directory {0} - {1}".format(path, str(exc)))
- self.log('Checking final path {0}'.format(self.dest))
- if os.path.isfile(self.dest) and not self.force:
- # dest already exists and we're not forcing
- self.log("Dest {0} already exists. Cannot download. Use the force option.".format(self.dest))
- return False
- return True
-
- def delete_container(self):
- if not self.check_mode:
- try:
- self.blob_client.delete_container(self.container)
- except AzureHttpError as exc:
- self.fail("Error deleting container {0} - {1}".format(self.container, str(exc)))
-
- self.results['changed'] = True
- self.results['actions'].append('deleted container {0}'.format(self.container))
-
- def container_has_blobs(self):
- try:
- list_generator = self.blob_client.list_blobs(self.container)
- except AzureHttpError as exc:
- self.fail("Error list blobs in {0} - {1}".format(self.container, str(exc)))
- if len(list_generator.items) > 0:
- return True
- return False
-
- def delete_blob(self):
- if not self.check_mode:
- try:
- self.blob_client.delete_blob(self.container, self.blob)
- except AzureHttpError as exc:
- self.fail("Error deleting blob {0}:{1} - {2}".format(self.container, self.blob, str(exc)))
-
- self.results['changed'] = True
- self.results['actions'].append('deleted blob {0}:{1}'.format(self.container, self.blob))
- self.results['container'] = self.container_obj
-
- def update_container_tags(self, tags):
- if not self.check_mode:
- try:
- self.blob_client.set_container_metadata(self.container, metadata=tags)
- except AzureHttpError as exc:
- self.fail("Error updating container tags {0} - {1}".format(self.container, str(exc)))
- self.container_obj = self.get_container()
- self.results['changed'] = True
- self.results['actions'].append("updated container {0} tags.".format(self.container))
- self.results['container'] = self.container_obj
-
- def update_blob_tags(self, tags):
- if not self.check_mode:
- try:
- self.blob_client.set_blob_metadata(self.container, self.blob, metadata=tags)
- except AzureHttpError as exc:
- self.fail("Update blob tags {0}:{1} - {2}".format(self.container, self.blob, str(exc)))
- self.blob_obj = self.get_blob()
- self.results['changed'] = True
- self.results['actions'].append("updated blob {0}:{1} tags.".format(self.container, self.blob))
- self.results['container'] = self.container_obj
- self.results['blob'] = self.blob_obj
-
- def blob_content_settings_differ(self):
- if self.content_type or self.content_encoding or self.content_language or self.content_disposition or \
- self.cache_control or self.content_md5:
- settings = dict(
- content_type=self.content_type,
- content_encoding=self.content_encoding,
- content_language=self.content_language,
- content_disposition=self.content_disposition,
- cache_control=self.cache_control,
- content_md5=self.content_md5
- )
- if self.blob_obj['content_settings'] != settings:
- return True
-
- return False
-
- def update_blob_content_settings(self):
- content_settings = ContentSettings(
- content_type=self.content_type,
- content_encoding=self.content_encoding,
- content_language=self.content_language,
- content_disposition=self.content_disposition,
- cache_control=self.cache_control,
- content_md5=self.content_md5
- )
- if not self.check_mode:
- try:
- self.blob_client.set_blob_properties(self.container, self.blob, content_settings=content_settings)
- except AzureHttpError as exc:
- self.fail("Update blob content settings {0}:{1} - {2}".format(self.container, self.blob, str(exc)))
-
- self.blob_obj = self.get_blob()
- self.results['changed'] = True
- self.results['actions'].append("updated blob {0}:{1} content settings.".format(self.container, self.blob))
- self.results['container'] = self.container_obj
- self.results['blob'] = self.blob_obj
-
-
-def main():
- AzureRMStorageBlob()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_subnet.py b/lib/ansible/modules/cloud/azure/azure_rm_subnet.py
deleted file mode 100644
index 80058d8d57..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_subnet.py
+++ /dev/null
@@ -1,399 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_subnet
-version_added: "2.1"
-short_description: Manage Azure subnets
-description:
- - Create, update or delete a subnet within a given virtual network.
- - Allows setting and updating the address prefix CIDR, which must be valid within the context of the virtual network.
- - Use the M(azure_rm_networkinterface) module to associate interfaces with the subnet and assign specific IP addresses.
-options:
- resource_group:
- description:
- - Name of resource group.
- required: true
- name:
- description:
- - Name of the subnet.
- required: true
- address_prefix_cidr:
- description:
- - CIDR defining the IPv4 address space of the subnet. Must be valid within the context of the virtual network.
- aliases:
- - address_prefix
- security_group:
- description:
- - Existing security group with which to associate the subnet.
- - It can be the security group name which is in the same resource group.
- - Can be the resource ID of the security group.
- - Can be a dict containing the I(name) and I(resource_group) of the security group.
- aliases:
- - security_group_name
- state:
- description:
- - Assert the state of the subnet. Use C(present) to create or update a subnet and use C(absent) to delete a subnet.
- default: present
- choices:
- - absent
- - present
- virtual_network_name:
- description:
- - Name of an existing virtual network with which the subnet is or will be associated.
- required: true
- aliases:
- - virtual_network
- route_table:
- description:
- - The reference of the RouteTable resource.
- - Can be the name or resource ID of the route table.
- - Can be a dict containing the I(name) and I(resource_group) of the route table.
- version_added: "2.7"
- service_endpoints:
- description:
- - An array of service endpoints.
- type: list
- suboptions:
- service:
- description:
- - The type of the endpoint service.
- required: True
- locations:
- description:
- - A list of locations.
- type: list
- version_added: "2.8"
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Chris Houseknecht (@chouseknecht)
- - Matt Davis (@nitzmahone)
-
-'''
-
-EXAMPLES = '''
- - name: Create a subnet
- azure_rm_subnet:
- resource_group: myResourceGroup
- virtual_network_name: myVirtualNetwork
- name: mySubnet
- address_prefix_cidr: "10.1.0.0/24"
-
- - name: Create a subnet refer nsg from other resource group
- azure_rm_subnet:
- resource_group: myResourceGroup
- virtual_network_name: myVirtualNetwork
- name: mySubnet
- address_prefix_cidr: "10.1.0.0/16"
- security_group:
- name: secgroupfoo
- resource_group: mySecondResourceGroup
- route_table: route
-
- - name: Delete a subnet
- azure_rm_subnet:
- resource_group: myResourceGroup
- virtual_network_name: myVirtualNetwork
- name: mySubnet
- state: absent
-'''
-
-RETURN = '''
-state:
- description:
- - Current state of the subnet.
- returned: success
- type: complex
- contains:
- address_prefix:
- description:
- - IP address CIDR.
- returned: always
- type: str
- sample: "10.1.0.0/16"
- id:
- description:
- - Subnet resource path.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVirtualNetwork/subnets/mySubnet"
- name:
- description:
- - Subnet name.
- returned: always
- type: str
- sample: "foobar"
- network_security_group:
- description:
- - Associated network security group of subnets.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Security group resource identifier.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/secgroupfoo"
- name:
- description:
- - Name of the security group.
- returned: always
- type: str
- sample: "secgroupfoo"
- provisioning_state:
- description:
- - Success or failure of the provisioning event.
- returned: always
- type: str
- sample: "Succeeded"
-''' # NOQA
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, CIDR_PATTERN, azure_id_to_dict, format_resource_id
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def subnet_to_dict(subnet):
- result = dict(
- id=subnet.id,
- name=subnet.name,
- provisioning_state=subnet.provisioning_state,
- address_prefix=subnet.address_prefix,
- network_security_group=dict(),
- route_table=dict()
- )
- if subnet.network_security_group:
- id_keys = azure_id_to_dict(subnet.network_security_group.id)
- result['network_security_group']['id'] = subnet.network_security_group.id
- result['network_security_group']['name'] = id_keys['networkSecurityGroups']
- result['network_security_group']['resource_group'] = id_keys['resourceGroups']
- if subnet.route_table:
- id_keys = azure_id_to_dict(subnet.route_table.id)
- result['route_table']['id'] = subnet.route_table.id
- result['route_table']['name'] = id_keys['routeTables']
- result['route_table']['resource_group'] = id_keys['resourceGroups']
- if subnet.service_endpoints:
- result['service_endpoints'] = [{'service': item.service, 'locations': item.locations or []} for item in subnet.service_endpoints]
- return result
-
-
-class AzureRMSubnet(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- virtual_network_name=dict(type='str', required=True, aliases=['virtual_network']),
- address_prefix_cidr=dict(type='str', aliases=['address_prefix']),
- security_group=dict(type='raw', aliases=['security_group_name']),
- route_table=dict(type='raw'),
- service_endpoints=dict(
- type='list'
- )
- )
-
- self.results = dict(
- changed=False,
- state=dict()
- )
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.virtual_network_name = None
- self.address_prefix_cidr = None
- self.security_group = None
- self.route_table = None
- self.service_endpoints = None
-
- super(AzureRMSubnet, self).__init__(self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
-
- nsg = None
- subnet = None
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.address_prefix_cidr and not CIDR_PATTERN.match(self.address_prefix_cidr):
- self.fail("Invalid address_prefix_cidr value {0}".format(self.address_prefix_cidr))
-
- nsg = dict()
- if self.security_group:
- nsg = self.parse_nsg()
-
- route_table = dict()
- if self.route_table:
- route_table = self.parse_resource_to_dict(self.route_table)
- self.route_table = format_resource_id(val=route_table['name'],
- subscription_id=route_table['subscription_id'],
- namespace='Microsoft.Network',
- types='routeTables',
- resource_group=route_table['resource_group'])
-
- results = dict()
- changed = False
-
- try:
- self.log('Fetching subnet {0}'.format(self.name))
- subnet = self.network_client.subnets.get(self.resource_group,
- self.virtual_network_name,
- self.name)
- self.check_provisioning_state(subnet, self.state)
- results = subnet_to_dict(subnet)
-
- if self.state == 'present':
- if self.address_prefix_cidr and results['address_prefix'] != self.address_prefix_cidr:
- self.log("CHANGED: subnet {0} address_prefix_cidr".format(self.name))
- changed = True
- results['address_prefix'] = self.address_prefix_cidr
-
- if self.security_group is not None and results['network_security_group'].get('id') != nsg.get('id'):
- self.log("CHANGED: subnet {0} network security group".format(self.name))
- changed = True
- results['network_security_group']['id'] = nsg.get('id')
- results['network_security_group']['name'] = nsg.get('name')
- if self.route_table is not None and self.route_table != results['route_table'].get('id'):
- changed = True
- results['route_table']['id'] = self.route_table
- self.log("CHANGED: subnet {0} route_table to {1}".format(self.name, route_table.get('name')))
-
- if self.service_endpoints:
- oldd = {}
- for item in self.service_endpoints:
- name = item['service']
- locations = item.get('locations') or []
- oldd[name] = {'service': name, 'locations': locations.sort()}
- newd = {}
- if 'service_endpoints' in results:
- for item in results['service_endpoints']:
- name = item['service']
- locations = item.get('locations') or []
- newd[name] = {'service': name, 'locations': locations.sort()}
- if newd != oldd:
- changed = True
- results['service_endpoints'] = self.service_endpoints
-
- elif self.state == 'absent':
- changed = True
- except CloudError:
- # the subnet does not exist
- if self.state == 'present':
- changed = True
-
- self.results['changed'] = changed
- self.results['state'] = results
-
- if not self.check_mode:
-
- if self.state == 'present' and changed:
- if not subnet:
- # create new subnet
- if not self.address_prefix_cidr:
- self.fail('address_prefix_cidr is not set')
- self.log('Creating subnet {0}'.format(self.name))
- subnet = self.network_models.Subnet(
- address_prefix=self.address_prefix_cidr
- )
- if nsg:
- subnet.network_security_group = self.network_models.NetworkSecurityGroup(id=nsg.get('id'))
- if self.route_table:
- subnet.route_table = self.network_models.RouteTable(id=self.route_table)
- if self.service_endpoints:
- subnet.service_endpoints = self.service_endpoints
- else:
- # update subnet
- self.log('Updating subnet {0}'.format(self.name))
- subnet = self.network_models.Subnet(
- address_prefix=results['address_prefix']
- )
- if results['network_security_group'].get('id') is not None:
- subnet.network_security_group = self.network_models.NetworkSecurityGroup(id=results['network_security_group'].get('id'))
- if results['route_table'].get('id') is not None:
- subnet.route_table = self.network_models.RouteTable(id=results['route_table'].get('id'))
-
- if results.get('service_endpoints') is not None:
- subnet.service_endpoints = results['service_endpoints']
-
- self.results['state'] = self.create_or_update_subnet(subnet)
- elif self.state == 'absent' and changed:
- # delete subnet
- self.delete_subnet()
- # the delete does not actually return anything. if no exception, then we'll assume
- # it worked.
- self.results['state']['status'] = 'Deleted'
-
- return self.results
-
- def create_or_update_subnet(self, subnet):
- try:
- poller = self.network_client.subnets.create_or_update(self.resource_group,
- self.virtual_network_name,
- self.name,
- subnet)
- new_subnet = self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error creating or updating subnet {0} - {1}".format(self.name, str(exc)))
- self.check_provisioning_state(new_subnet)
- return subnet_to_dict(new_subnet)
-
- def delete_subnet(self):
- self.log('Deleting subnet {0}'.format(self.name))
- try:
- poller = self.network_client.subnets.delete(self.resource_group,
- self.virtual_network_name,
- self.name)
- result = self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error deleting subnet {0} - {1}".format(self.name, str(exc)))
-
- return result
-
- def parse_nsg(self):
- nsg = self.security_group
- resource_group = self.resource_group
- if isinstance(self.security_group, dict):
- nsg = self.security_group.get('name')
- resource_group = self.security_group.get('resource_group', self.resource_group)
- id = format_resource_id(val=nsg,
- subscription_id=self.subscription_id,
- namespace='Microsoft.Network',
- types='networkSecurityGroups',
- resource_group=resource_group)
- name = azure_id_to_dict(id).get('name')
- return dict(id=id, name=name)
-
-
-def main():
- AzureRMSubnet()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_subnet_info.py b/lib/ansible/modules/cloud/azure/azure_rm_subnet_info.py
deleted file mode 100644
index 6bf4e83b9f..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_subnet_info.py
+++ /dev/null
@@ -1,246 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_subnet_info
-version_added: "2.8"
-short_description: Get Azure Subnet facts
-description:
- - Get facts of Azure Subnet.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- virtual_network_name:
- description:
- - The name of the virtual network.
- required: True
- name:
- description:
- - The name of the subnet.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get facts of specific subnet
- azure_rm_subnet_info:
- resource_group: myResourceGroup
- virtual_network_name: myVirtualNetwork
- name: mySubnet
-
- - name: List facts for all subnets in virtual network
- azure_rm_subnet_info:
- resource_group: myResourceGroup
- virtual_network_name: myVirtualNetwork
- name: mySubnet
-'''
-
-RETURN = '''
-subnets:
- description:
- - A list of dictionaries containing facts for subnet.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Subnet resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/my
- VirtualNetwork/subnets/mySubnet"
- resource_group:
- description:
- - Name of resource group.
- returned: always
- type: str
- sample: myResourceGroup
- virtual_network_name:
- description:
- - Name of the containing virtual network.
- returned: always
- type: str
- sample: myVirtualNetwork
- name:
- description:
- - Name of the subnet.
- returned: always
- type: str
- sample: mySubnet
- address_prefix_cidr:
- description:
- - CIDR defining the IPv4 address space of the subnet.
- returned: always
- type: str
- sample: "10.1.0.0/16"
- route_table:
- description:
- - Associated route table ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/routeTables/myRouteTable
- security_group:
- description:
- - Associated security group ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/networkSecurityGr
- oups/myNsg"
- service_endpoints:
- description:
- - List of service endpoints.
- type: list
- returned: when available
- contains:
- service:
- description:
- - The type of the endpoint service.
- returned: always
- type: str
- sample: Microsoft.Sql
- locations:
- description:
- - A list of location names.
- type: list
- returned: always
- sample: [ 'eastus', 'westus' ]
- provisioning_state:
- description:
- - Provisioning state.
- returned: always
- type: str
- sample: Succeeded
- provisioning_state:
- description:
- - Provisioning state.
- returned: always
- type: str
- sample: Succeeded
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.network import NetworkManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMSubnetInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- virtual_network_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.resource_group = None
- self.virtual_network_name = None
- self.name = None
- super(AzureRMSubnetInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_subnet_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_subnet_facts' module has been renamed to 'azure_rm_subnet_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name is not None:
- self.results['subnets'] = self.get()
- else:
- self.results['subnets'] = self.list()
-
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.network_client.subnets.get(resource_group_name=self.resource_group,
- virtual_network_name=self.virtual_network_name,
- subnet_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not get facts for Subnet.')
-
- if response is not None:
- results.append(self.format_response(response))
-
- return results
-
- def list(self):
- response = None
- results = []
- try:
- response = self.network_client.subnets.get(resource_group_name=self.resource_group,
- virtual_network_name=self.virtual_network_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.fail('Could not get facts for Subnet.')
-
- if response is not None:
- for item in response:
- results.append(self.format_item(item))
-
- return results
-
- def format_response(self, item):
- d = item.as_dict()
- d = {
- 'resource_group': self.resource_group,
- 'virtual_network_name': self.parse_resource_to_dict(d.get('id')).get('name'),
- 'name': d.get('name'),
- 'id': d.get('id'),
- 'address_prefix_cidr': d.get('address_prefix'),
- 'route_table': d.get('route_table', {}).get('id'),
- 'security_group': d.get('network_security_group', {}).get('id'),
- 'provisioning_state': d.get('provisioning_state'),
- 'service_endpoints': d.get('service_endpoints')
- }
- return d
-
-
-def main():
- AzureRMSubnetInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerendpoint.py b/lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerendpoint.py
deleted file mode 100644
index 392bc90e3c..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerendpoint.py
+++ /dev/null
@@ -1,374 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Hai Cao, <t-haicao@microsoft.com>, Yunge Zhu <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_trafficmanagerendpoint
-version_added: "2.7"
-short_description: Manage Azure Traffic Manager endpoint
-description:
- - Create, update and delete Azure Traffic Manager endpoint.
-
-options:
- resource_group:
- description:
- - Name of a resource group where the Traffic Manager endpoint exists or will be created.
- type: str
- required: true
- name:
- description:
- - The name of the endpoint.
- type: str
- required: true
- profile_name:
- description:
- - Name of Traffic Manager profile where this endpoints attaches to.
- type: str
- required: true
- type:
- description:
- - The type of the endpoint.
- required: true
- choices:
- - azure_endpoints
- - external_endpoints
- - nested_endpoints
- target_resource_id:
- description:
- - The Azure Resource URI of the of the endpoint.
- - Not applicable to endpoints of I(type=external_endpoints).
- type: str
- target:
- description:
- - The fully-qualified DNS name of the endpoint.
- type: str
- enabled:
- description:
- - The status of the endpoint.
- type: bool
- default: true
- weight:
- description:
- - The weight of this endpoint when traffic manager profile has routing_method of C(weighted).
- - Possible values are from 1 to 1000.
- type: int
- priority:
- description:
- - The priority of this endpoint when traffic manager profile has routing_method of I(priority).
- - Possible values are from 1 to 1000, lower values represent higher priority.
- - This is an optional parameter. If specified, it must be specified on all endpoints.
- - No two endpoints can share the same priority value.
- type: int
- location:
- description:
- - Specifies the location of the external or nested endpoints when using the 'Performance' traffic routing method.
- type: str
- min_child_endpoints:
- description:
- - The minimum number of endpoints that must be available in the child profile in order for the parent profile to be considered available.
- - Only applicable to endpoint of I(type=nested_endpoints).
- type: int
- geo_mapping:
- description:
- - The list of countries/regions mapped to this endpoint when traffic manager profile has routing_method of C(geographic).
- type: list
- state:
- description:
- - Assert the state of the Traffic Manager endpoint. Use C(present) to create or update a Traffic Manager endpoint and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Hai Cao (@caohai)
- - Yunge Zhu (@yungezz)
-
-'''
-
-EXAMPLES = '''
- - name: create a endpoint for a traffic manager profile
- azure_rm_trafficmanagerendpoint:
- resource_group: testresourcegroup
- profile_name: myprofilename
- name: testendpoint1
- type: external_endpoints
- location: westus
- priority: 2
- weight: 1
- target: 1.2.3.4
-'''
-
-RETURN = '''
-id:
- description:
- - The ID of the traffic manager endpoint.
- returned: when traffic manager endpoint exists
- type: str
- example:
- "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/trafficManagerProfiles/testProfil
- e/externalEndpoints/testendpoint"
-'''
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, normalize_location_name
-from ansible.module_utils.common.dict_transformations import _snake_to_camel
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.trafficmanager.models import (
- Endpoint, DnsConfig, MonitorConfig
- )
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def traffic_manager_endpoint_to_dict(endpoint):
- return dict(
- id=endpoint.id,
- name=endpoint.name,
- type=endpoint.type,
- target_resource_id=endpoint.target_resource_id,
- target=endpoint.target,
- status=endpoint.endpoint_status,
- weight=endpoint.weight,
- priority=endpoint.priority,
- location=endpoint.endpoint_location,
- monitor_status=endpoint.endpoint_monitor_status,
- min_child_endpoints=endpoint.min_child_endpoints,
- geo_mapping=endpoint.geo_mapping
- )
-
-
-class Actions:
- NoAction, CreateOrUpdate, Delete = range(3)
-
-
-class AzureRMTrafficManagerEndpoint(AzureRMModuleBase):
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- profile_name=dict(
- type='str',
- required=True
- ),
- type=dict(
- type='str',
- choices=['azure_endpoints', 'external_endpoints', 'nested_endpoints'],
- required=True
- ),
- target=dict(type='str'),
- target_resource_id=dict(type='str'),
- enabled=dict(type='bool', default=True),
- weight=dict(type='int'),
- priority=dict(type='int'),
- location=dict(type='str'),
- min_child_endpoints=dict(type='int'),
- geo_mapping=dict(type='list', elements='str'),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- ),
- )
-
- self.resource_group = None
- self.name = None
- self.state = None
-
- self.profile_name = None
- self.type = None
- self.target_resource_id = None
- self.enabled = None
- self.weight = None
- self.priority = None
- self.location = None
- self.min_child_endpoints = None
- self.geo_mapping = None
- self.endpoint_status = 'Enabled'
-
- self.action = Actions.NoAction
-
- self.results = dict(
- changed=False
- )
-
- super(AzureRMTrafficManagerEndpoint, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()):
- setattr(self, key, kwargs[key])
-
- if self.type:
- self.type = _snake_to_camel(self.type)
-
- to_be_updated = False
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- if self.enabled is not None and self.enabled is False:
- self.endpoint_status = 'Disabled'
-
- response = self.get_traffic_manager_endpoint()
-
- if response:
- self.log('Results : {0}'.format(response))
- self.results['id'] = response['id']
- if self.state == 'present':
- # check update
- to_be_update = self.check_update(response)
- if to_be_update:
- self.action = Actions.CreateOrUpdate
-
- elif self.state == 'absent':
- # delete
- self.action = Actions.Delete
- else:
- if self.state == 'present':
- self.action = Actions.CreateOrUpdate
- elif self.state == 'absent':
- # delete when no exists
- self.fail("Traffic Manager endpoint {0} not exists.".format(self.name))
-
- if self.action == Actions.CreateOrUpdate:
- self.results['changed'] = True
- if self.check_mode:
- return self.results
-
- response = self.create_update_traffic_manager_endpoint()
- self.results['id'] = response['id']
-
- if self.action == Actions.Delete:
- self.results['changed'] = True
- if self.check_mode:
- return self.results
- response = self.delete_traffic_manager_endpoint()
-
- return self.results
-
- def get_traffic_manager_endpoint(self):
- '''
- Gets the properties of the specified Traffic Manager endpoint
-
- :return: deserialized Traffic Manager endpoint dict
- '''
- self.log("Checking if Traffic Manager endpoint {0} is present".format(self.name))
- try:
- response = self.traffic_manager_management_client.endpoints.get(self.resource_group, self.profile_name, self.type, self.name)
- self.log("Response : {0}".format(response))
- return traffic_manager_endpoint_to_dict(response)
- except CloudError:
- self.log('Did not find the Traffic Manager endpoint.')
- return False
-
- def delete_traffic_manager_endpoint(self):
- '''
- Deletes the specified Traffic Manager endpoint.
- :return: True
- '''
-
- self.log("Deleting the Traffic Manager endpoint {0}".format(self.name))
- try:
- operation_result = self.traffic_manager_management_client.endpoints.delete(self.resource_group, self.profile_name, self.type, self.name)
- return True
- except CloudError as exc:
- request_id = exc.request_id if exc.request_id else ''
- self.fail("Error deleting the Traffic Manager endpoint {0}, request id {1} - {2}".format(self.name, request_id, str(exc)))
- return False
-
- def create_update_traffic_manager_endpoint(self):
- '''
- Creates or updates a Traffic Manager endpoint.
-
- :return: deserialized Traffic Manager endpoint state dictionary
- '''
- self.log("Creating / Updating the Traffic Manager endpoint {0}".format(self.name))
-
- parameters = Endpoint(target_resource_id=self.target_resource_id,
- target=self.target,
- endpoint_status=self.endpoint_status,
- weight=self.weight,
- priority=self.priority,
- endpoint_location=self.location,
- min_child_endpoints=self.min_child_endpoints,
- geo_mapping=self.geo_mapping)
-
- try:
- response = self.traffic_manager_management_client.endpoints.create_or_update(self.resource_group,
- self.profile_name,
- self.type,
- self.name,
- parameters)
- return traffic_manager_endpoint_to_dict(response)
- except CloudError as exc:
- request_id = exc.request_id if exc.request_id else ''
- self.fail("Error creating the Traffic Manager endpoint {0}, request id {1} - {2}".format(self.name, request_id, str(exc)))
-
- def check_update(self, response):
- if self.endpoint_status is not None and response['status'].lower() != self.endpoint_status.lower():
- self.log("Status Diff - Origin {0} / Update {1}".format(response['status'], self.endpoint_status))
- return True
-
- if self.type and response['type'].lower() != "Microsoft.network/TrafficManagerProfiles/{0}".format(self.type).lower():
- self.log("Type Diff - Origin {0} / Update {1}".format(response['type'], self.type))
- return True
-
- if self.target_resource_id and response['target_resource_id'] != self.target_resource_id:
- self.log("target_resource_id Diff - Origin {0} / Update {1}".format(response['target_resource_id'], self.target_resource_id))
- return True
-
- if self.target and response['target'] != self.target:
- self.log("target Diff - Origin {0} / Update {1}".format(response['target'], self.target))
- return True
-
- if self.weight and int(response['weight']) != self.weight:
- self.log("weight Diff - Origin {0} / Update {1}".format(response['weight'], self.weight))
- return True
-
- if self.priority and int(response['priority']) != self.priority:
- self.log("priority Diff - Origin {0} / Update {1}".format(response['priority'], self.priority))
- return True
-
- if self.min_child_endpoints and int(response['min_child_endpoints']) != self.min_child_endpoints:
- self.log("min_child_endpoints Diff - Origin {0} / Update {1}".format(response['min_child_endpoints'], self.min_child_endpoints))
- return True
-
- if self.geo_mapping and response['geo_mapping'] != self.geo_mapping:
- self.log("geo_mapping Diff - Origin {0} / Update {1}".format(response['geo_mapping'], self.geo_mapping))
- return True
-
- return False
-
-
-def main():
- """Main execution"""
- AzureRMTrafficManagerEndpoint()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerendpoint_info.py b/lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerendpoint_info.py
deleted file mode 100644
index f2a565cd9b..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerendpoint_info.py
+++ /dev/null
@@ -1,311 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Hai Cao, <t-haicao@microsoft.com>, Yunge Zhu <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_trafficmanagerendpoint_info
-
-version_added: "2.9"
-
-short_description: Get Azure Traffic Manager endpoint facts
-
-description:
- - Get facts for a specific Traffic Manager endpoints or all endpoints in a Traffic Manager profile.
-
-options:
- name:
- description:
- - Limit results to a specific Traffic Manager endpoint.
- resource_group:
- description:
- - The resource group to search for the desired Traffic Manager profile.
- required: True
- profile_name:
- description:
- - Name of Traffic Manager Profile.
- required: True
- type:
- description:
- - Type of endpoint.
- choices:
- - azure_endpoints
- - external_endpoints
- - nested_endpoints
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Hai Cao (@caohai)
- - Yunge Zhu (@yungezz)
-'''
-
-EXAMPLES = '''
- - name: Get endpoints facts of a Traffic Manager profile
- azure_rm_trafficmanagerendpoint_info:
- resource_group: myResourceGroup
- profile_name: Testing
-
- - name: Get specific endpoint of a Traffic Manager profile
- azure_rm_trafficmanager_info:
- resource_group: myResourceGroup
- profile_name: Testing
- name: test_external_endpoint
-
-'''
-
-RETURN = '''
-endpoints:
- description:
- - List of Traffic Manager endpoints.
- returned: always
- type: complex
- contains:
- resource_group:
- description:
- - Name of a resource group.
- returned: always
- type: str
- sample: myResourceGroup
- name:
- description:
- - Name of the Traffic Manager endpoint.
- returned: always
- type: str
- sample: testendpoint
- type:
- description:
- - The type of the endpoint.
- returned: always
- type: str
- sample: external_endpoints
- target_resource_id:
- description:
- - The Azure Resource URI of the of the endpoint.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ClassicCompute/domainNames/vscjavaci
- target:
- description:
- - The fully-qualified DNS name of the endpoint.
- returned: always
- type: str
- sample: 8.8.8.8
- enabled:
- description:
- - The status of the endpoint.
- returned: always
- type: str
- sample: Enabled
- weight:
- description:
- - The weight of this endpoint when using the 'Weighted' traffic routing method.
- returned: always
- type: int
- sample: 10
- priority:
- description:
- - The priority of this endpoint when using the 'Priority' traffic routing method.
- returned: always
- type: str
- sample: 3
- location:
- description:
- - The location of the external or nested endpoints when using the 'Performance' traffic routing method.
- returned: always
- type: str
- sample: East US
- min_child_endpoints:
- description:
- - The minimum number of endpoints that must be available in the child profile to make the parent profile available.
- returned: always
- type: int
- sample: 3
- geo_mapping:
- description:
- - The list of countries/regions mapped to this endpoint when using the 'Geographic' traffic routing method.
- returned: always
- type: list
- sample: [
- "GEO-NA",
- "GEO-AS"
- ]
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import (
- _snake_to_camel, _camel_to_snake
-)
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.common import AzureHttpError
-except Exception:
- # handled in azure_rm_common
- pass
-
-import re
-
-AZURE_OBJECT_CLASS = 'TrafficManagerEndpoints'
-
-
-def serialize_endpoint(endpoint, resource_group):
- result = dict(
- id=endpoint.id,
- name=endpoint.name,
- target_resource_id=endpoint.target_resource_id,
- target=endpoint.target,
- enabled=True,
- weight=endpoint.weight,
- priority=endpoint.priority,
- location=endpoint.endpoint_location,
- min_child_endpoints=endpoint.min_child_endpoints,
- geo_mapping=endpoint.geo_mapping,
- monitor_status=endpoint.endpoint_monitor_status,
- resource_group=resource_group
- )
-
- if endpoint.endpoint_status and endpoint.endpoint_status == 'Disabled':
- result['enabled'] = False
-
- if endpoint.type:
- result['type'] = _camel_to_snake(endpoint.type.split("/")[-1])
-
- return result
-
-
-class AzureRMTrafficManagerEndpointInfo(AzureRMModuleBase):
- """Utility class to get Azure Traffic Manager Endpoint facts"""
-
- def __init__(self):
-
- self.module_args = dict(
- profile_name=dict(
- type='str',
- required=True),
- resource_group=dict(
- type='str',
- required=True),
- name=dict(type='str'),
- type=dict(
- type='str',
- choices=[
- 'azure_endpoints',
- 'external_endpoints',
- 'nested_endpoints'
- ])
- )
-
- self.results = dict(
- changed=False,
- endpoints=[]
- )
-
- self.profile_name = None
- self.name = None
- self.resource_group = None
- self.type = None
-
- super(AzureRMTrafficManagerEndpointInfo, self).__init__(
- derived_arg_spec=self.module_args,
- supports_tags=False,
- facts_module=True
- )
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_trafficmanagerendpoint_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_trafficmanagerendpoint_facts' module has been renamed to 'azure_rm_trafficmanagerendpoint_info'",
- version='2.13')
-
- for key in self.module_args:
- setattr(self, key, kwargs[key])
-
- if self.type:
- self.type = _snake_to_camel(self.type)
-
- if self.name and not self.resource_group:
- self.fail("Parameter error: resource group required when filtering by name.")
-
- if self.name:
- self.results['endpoints'] = self.get_item()
- elif self.type:
- self.results['endpoints'] = self.list_by_type()
- else:
- self.results['endpoints'] = self.list_by_profile()
-
- return self.results
-
- def get_item(self):
- """Get a single Azure Traffic Manager endpoint"""
-
- self.log('Get properties for {0}'.format(self.name))
-
- item = None
- result = []
-
- try:
- item = self.traffic_manager_management_client.endpoints.get(
- self.resource_group, self.profile_name, self.type, self.name)
- except CloudError:
- pass
-
- if item:
- if (self.type and self.type == item.type) or self.type is None:
- result = [self.serialize_tm(item)]
-
- return result
-
- def list_by_profile(self):
- """Get all Azure Traffic Manager endpoints of a profile"""
-
- self.log('List all endpoints belongs to a Traffic Manager profile')
-
- try:
- response = self.traffic_manager_management_client.profiles.get(self.resource_group, self.profile_name)
- except AzureHttpError as exc:
- self.fail('Failed to list all items - {0}'.format(str(exc)))
-
- results = []
- if response and response.endpoints:
- for endpoint in response.endpoints:
- results.append(serialize_endpoint(endpoint, self.resource_group))
-
- return results
-
- def list_by_type(self):
- """Get all Azure Traffic Managers endpoints of a profile by type"""
- self.log('List all Traffic Manager endpoints of a profile by type')
- try:
- response = self.traffic_manager_management_client.profiles.get(self.resource_group, self.profile_name)
- except AzureHttpError as exc:
- self.fail('Failed to list all items - {0}'.format(str(exc)))
-
- results = []
- for item in response:
- if item.endpoints:
- for endpoint in item.endpoints:
- if endpoint.type == self.type:
- results.append(serialize_endpoint(endpoint, self.resource_group))
- return results
-
-
-def main():
- """Main module execution code path"""
-
- AzureRMTrafficManagerEndpointInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile.py b/lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile.py
deleted file mode 100644
index 4a15340c5b..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile.py
+++ /dev/null
@@ -1,464 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Hai Cao, <t-haicao@microsoft.com> Yunge Zhu <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_trafficmanagerprofile
-version_added: "2.7"
-short_description: Manage Azure Traffic Manager profile
-description:
- - Create, update and delete a Traffic Manager profile.
-
-options:
- resource_group:
- description:
- - Name of a resource group where the Traffic Manager profile exists or will be created.
- required: true
- name:
- description:
- - Name of the Traffic Manager profile.
- required: true
- state:
- description:
- - Assert the state of the Traffic Manager profile. Use C(present) to create or update a Traffic Manager profile and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
- location:
- description:
- - Valid Azure location. Defaults to C(global) because in default public Azure cloud, Traffic Manager profile can only be deployed globally.
- - Reference U(https://docs.microsoft.com/en-us/azure/traffic-manager/quickstart-create-traffic-manager-profile#create-a-traffic-manager-profile).
- default: global
- profile_status:
- description:
- - The status of the Traffic Manager profile.
- default: enabled
- choices:
- - enabled
- - disabled
- routing_method:
- description:
- - The traffic routing method of the Traffic Manager profile.
- default: performance
- choices:
- - performance
- - priority
- - weighted
- - geographic
- dns_config:
- description:
- - The DNS settings of the Traffic Manager profile.
- suboptions:
- relative_name:
- description:
- - The relative DNS name provided by this Traffic Manager profile.
- - If not provided, name of the Traffic Manager will be used.
- ttl:
- description:
- - The DNS Time-To-Live (TTL), in seconds.
- type: int
- default: 60
- monitor_config:
- description:
- - The endpoint monitoring settings of the Traffic Manager profile.
- suboptions:
- protocol:
- description:
- - The protocol C(HTTP), C(HTTPS) or C(TCP) used to probe for endpoint health.
- choices:
- - HTTP
- - HTTPS
- - TCP
- port:
- description:
- - The TCP port used to probe for endpoint health.
- path:
- description:
- - The path relative to the endpoint domain name used to probe for endpoint health.
- interval:
- description:
- - The monitor interval for endpoints in this profile in seconds.
- type: int
- timeout:
- description:
- - The monitor timeout for endpoints in this profile in seconds.
- type: int
- tolerated_failures:
- description:
- - The number of consecutive failed health check before declaring an endpoint in this profile Degraded after the next failed health check.
- default:
- protocol: HTTP
- port: 80
- path: /
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Hai Cao (@caohai)
- - Yunge Zhu (@yungezz)
-
-'''
-
-EXAMPLES = '''
- - name: Create a Traffic Manager Profile
- azure_rm_trafficmanagerprofile:
- name: tmtest
- resource_group: myResourceGroup
- location: global
- profile_status: enabled
- routing_method: priority
- dns_config:
- relative_name: tmtest
- ttl: 60
- monitor_config:
- protocol: HTTPS
- port: 80
- path: '/'
- tags:
- Environment: Test
-
- - name: Delete a Traffic Manager Profile
- azure_rm_trafficmanagerprofile:
- state: absent
- name: tmtest
- resource_group: myResourceGroup
-'''
-RETURN = '''
-id:
- description:
- - The ID of the traffic manager profile.
- returned: when traffic manager profile exists
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/tmt/providers/Microsoft.Network/trafficManagerProfiles/tmtest"
-endpoints:
- description:
- - List of endpoint IDs attached to the profile.
- returned: when traffic manager endpoints exists
- type: list
- sample: [
- "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/tmt/providers/Microsoft.Network/trafficManagerProfiles/tm049b1ae293/exter
- nalEndpoints/e2",
- "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/tmt/providers/Microsoft.Network/trafficManagerProfiles/tm049b1ae293/exter
- nalEndpoints/e1"
- ]
-'''
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, normalize_location_name
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.trafficmanager.models import (
- Profile, Endpoint, DnsConfig, MonitorConfig
- )
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def shorten_traffic_manager_dict(tmd):
- return dict(
- id=tmd['id'],
- endpoints=[endpoint['id'] for endpoint in tmd['endpoints']] if tmd['endpoints'] else []
- )
-
-
-def traffic_manager_profile_to_dict(tmp):
- result = dict(
- id=tmp.id,
- name=tmp.name,
- type=tmp.type,
- tags=tmp.tags,
- location=tmp.location,
- profile_status=tmp.profile_status,
- routing_method=tmp.traffic_routing_method,
- dns_config=dict(),
- monitor_config=dict(),
- endpoints=[]
- )
- if tmp.dns_config:
- result['dns_config']['relative_name'] = tmp.dns_config.relative_name
- result['dns_config']['fqdn'] = tmp.dns_config.fqdn
- result['dns_config']['ttl'] = tmp.dns_config.ttl
- if tmp.monitor_config:
- result['monitor_config']['profile_monitor_status'] = tmp.monitor_config.profile_monitor_status
- result['monitor_config']['protocol'] = tmp.monitor_config.protocol
- result['monitor_config']['port'] = tmp.monitor_config.port
- result['monitor_config']['path'] = tmp.monitor_config.path
- result['monitor_config']['interval'] = tmp.monitor_config.interval_in_seconds
- result['monitor_config']['timeout'] = tmp.monitor_config.timeout_in_seconds
- result['monitor_config']['tolerated_failures'] = tmp.monitor_config.tolerated_number_of_failures
- if tmp.endpoints:
- for endpoint in tmp.endpoints:
- result['endpoints'].append(dict(
- id=endpoint.id,
- name=endpoint.name,
- type=endpoint.type,
- target_resource_id=endpoint.target_resource_id,
- target=endpoint.target,
- endpoint_status=endpoint.endpoint_status,
- weight=endpoint.weight,
- priority=endpoint.priority,
- endpoint_location=endpoint.endpoint_location,
- endpoint_monitor_status=endpoint.endpoint_monitor_status,
- min_child_endpoints=endpoint.min_child_endpoints,
- geo_mapping=endpoint.geo_mapping
- ))
- return result
-
-
-def create_dns_config_instance(dns_config):
- return DnsConfig(
- relative_name=dns_config['relative_name'],
- ttl=dns_config['ttl']
- )
-
-
-def create_monitor_config_instance(monitor_config):
- return MonitorConfig(
- profile_monitor_status=monitor_config['profile_monitor_status'],
- protocol=monitor_config['protocol'],
- port=monitor_config['port'],
- path=monitor_config['path'],
- interval_in_seconds=monitor_config['interval'],
- timeout_in_seconds=monitor_config['timeout'],
- tolerated_number_of_failures=monitor_config['tolerated_failures']
- )
-
-
-dns_config_spec = dict(
- relative_name=dict(type='str'),
- ttl=dict(type='int')
-)
-
-monitor_config_spec = dict(
- profile_monitor_status=dict(type='str'),
- protocol=dict(type='str'),
- port=dict(type='int'),
- path=dict(type='str'),
- interval=dict(type='int'),
- timeout=dict(type='int'),
- tolerated_failures=dict(type='int')
-)
-
-
-class AzureRMTrafficManagerProfile(AzureRMModuleBase):
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- ),
- location=dict(
- type='str',
- default='global'
- ),
- profile_status=dict(
- type='str',
- default='enabled',
- choices=['enabled', 'disabled']
- ),
- routing_method=dict(
- type='str',
- default='performance',
- choices=['performance', 'priority', 'weighted', 'geographic']
- ),
- dns_config=dict(
- type='dict',
- options=dns_config_spec
- ),
- monitor_config=dict(
- type='dict',
- default=dict(
- protocol='HTTP',
- port=80,
- path='/'
- ),
- options=monitor_config_spec
- ),
- )
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.tags = None
- self.location = None
- self.profile_status = None
- self.routing_method = None
- self.dns_config = None
- self.monitor_config = None
- self.endpoints_copy = None
-
- self.results = dict(
- changed=False
- )
-
- super(AzureRMTrafficManagerProfile, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- to_be_updated = False
-
- if not self.dns_config:
- self.dns_config = dict(
- relative_name=self.name,
- ttl=60
- )
-
- if not self.location:
- self.location = 'global'
-
- response = self.get_traffic_manager_profile()
-
- if self.state == 'present':
- if not response:
- to_be_updated = True
- else:
- self.results = shorten_traffic_manager_dict(response)
- self.log('Results : {0}'.format(response))
- update_tags, response['tags'] = self.update_tags(response['tags'])
-
- if update_tags:
- to_be_updated = True
-
- to_be_updated = to_be_updated or self.check_update(response)
-
- if to_be_updated:
- self.log("Need to Create / Update the Traffic Manager profile")
-
- if not self.check_mode:
- self.results = shorten_traffic_manager_dict(self.create_update_traffic_manager_profile())
- self.log("Creation / Update done.")
-
- self.results['changed'] = True
- return self.results
-
- elif self.state == 'absent' and response:
- self.log("Need to delete the Traffic Manager profile")
- self.results = shorten_traffic_manager_dict(response)
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_traffic_manager_profile()
-
- self.log("Traffic Manager profile deleted")
-
- return self.results
-
- def get_traffic_manager_profile(self):
- '''
- Gets the properties of the specified Traffic Manager profile
-
- :return: deserialized Traffic Manager profile dict
- '''
- self.log("Checking if Traffic Manager profile {0} is present".format(self.name))
- try:
- response = self.traffic_manager_management_client.profiles.get(self.resource_group, self.name)
- self.log("Response : {0}".format(response))
- self.log("Traffic Manager profile : {0} found".format(response.name))
- self.endpoints_copy = response.endpoints if response and response.endpoints else None
- return traffic_manager_profile_to_dict(response)
- except CloudError:
- self.log('Did not find the Traffic Manager profile.')
- return False
-
- def delete_traffic_manager_profile(self):
- '''
- Deletes the specified Traffic Manager profile in the specified subscription and resource group.
- :return: True
- '''
-
- self.log("Deleting the Traffic Manager profile {0}".format(self.name))
- try:
- operation_result = self.traffic_manager_management_client.profiles.delete(self.resource_group, self.name)
- return True
- except CloudError as e:
- self.log('Error attempting to delete the Traffic Manager profile.')
- self.fail("Error deleting the Traffic Manager profile: {0}".format(e.message))
- return False
-
- def create_update_traffic_manager_profile(self):
- '''
- Creates or updates a Traffic Manager profile.
-
- :return: deserialized Traffic Manager profile state dictionary
- '''
- self.log("Creating / Updating the Traffic Manager profile {0}".format(self.name))
-
- parameters = Profile(
- tags=self.tags,
- location=self.location,
- profile_status=self.profile_status,
- traffic_routing_method=self.routing_method,
- dns_config=create_dns_config_instance(self.dns_config) if self.dns_config else None,
- monitor_config=create_monitor_config_instance(self.monitor_config) if self.monitor_config else None,
- endpoints=self.endpoints_copy
- )
- try:
- response = self.traffic_manager_management_client.profiles.create_or_update(self.resource_group, self.name, parameters)
- return traffic_manager_profile_to_dict(response)
- except CloudError as exc:
- self.log('Error attempting to create the Traffic Manager.')
- self.fail("Error creating the Traffic Manager: {0}".format(exc.message))
-
- def check_update(self, response):
- if self.location and normalize_location_name(response['location']) != normalize_location_name(self.location):
- self.log("Location Diff - Origin {0} / Update {1}".format(response['location'], self.location))
- return True
-
- if self.profile_status and response['profile_status'].lower() != self.profile_status:
- self.log("Profile Status Diff - Origin {0} / Update {1}".format(response['profile_status'], self.profile_status))
- return True
-
- if self.routing_method and response['routing_method'].lower() != self.routing_method:
- self.log("Traffic Routing Method Diff - Origin {0} / Update {1}".format(response['routing_method'], self.routing_method))
- return True
-
- if self.dns_config and \
- (response['dns_config']['relative_name'] != self.dns_config['relative_name'] or response['dns_config']['ttl'] != self.dns_config['ttl']):
- self.log("DNS Config Diff - Origin {0} / Update {1}".format(response['dns_config'], self.dns_config))
- return True
-
- for k, v in self.monitor_config.items():
- if v:
- if str(v).lower() != str(response['monitor_config'][k]).lower():
- self.log("Monitor Config Diff - Origin {0} / Update {1}".format(response['monitor_config'], self.monitor_config))
- return True
- return False
-
-
-def main():
- """Main execution"""
- AzureRMTrafficManagerProfile()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile_info.py b/lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile_info.py
deleted file mode 100644
index 2b6ed59829..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile_info.py
+++ /dev/null
@@ -1,422 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Hai Cao, <t-haicao@microsoft.com>, Yunge Zhu <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_trafficmanagerprofile_info
-
-version_added: "2.9"
-
-short_description: Get Azure Traffic Manager profile facts
-
-description:
- - Get facts for a Azure specific Traffic Manager profile or all Traffic Manager profiles.
-
-options:
- name:
- description:
- - Limit results to a specific Traffic Manager profile.
- resource_group:
- description:
- - The resource group to search for the desired Traffic Manager profile.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Hai Cao (@caohai)
- - Yunge Zhu (@yungezz)
-'''
-
-EXAMPLES = '''
- - name: Get facts for one Traffic Manager profile
- azure_rm_trafficmanager_info:
- name: Testing
- resource_group: myResourceGroup
-
- - name: Get facts for all Traffic Manager profiles
- azure_rm_trafficmanager_info:
-
- - name: Get facts by tags
- azure_rm_trafficmanager_info:
- tags:
- - Environment:Test
-'''
-
-RETURN = '''
-tms:
- description:
- - List of Traffic Manager profiles.
- returned: always
- type: complex
- contains:
- resource_group:
- description:
- - Name of a resource group where the Traffic Manager profile exists.
- returned: always
- type: str
- sample: testGroup
- name:
- description:
- - Name of the Traffic Manager profile.
- returned: always
- type: str
- sample: testTm
- state:
- description:
- - The state of the Traffic Manager profile.
- returned: always
- type: str
- sample: present
- location:
- description:
- - Location of the Traffic Manager profile.
- returned: always
- type: str
- sample: global
- profile_status:
- description:
- - The status of the Traffic Manager profile.
- returned: always
- type: str
- sample: Enabled
- routing_method:
- description:
- - The traffic routing method of the Traffic Manager profile.
- returned: always
- type: str
- sample: performance
- dns_config:
- description:
- - The DNS settings of the Traffic Manager profile.
- returned: always
- type: complex
- contains:
- relative_name:
- description:
- - The relative DNS name provided by the Traffic Manager profile.
- returned: always
- type: str
- sample: testTm
- fqdn:
- description:
- - The fully-qualified domain name(FQDN) of the Traffic Manager profile.
- returned: always
- type: str
- sample: testTm.trafficmanager.net
- ttl:
- description:
- - The DNS Time-To-Live(TTL), in seconds.
- returned: always
- type: int
- sample: 60
- monitor_config:
- description:
- - The endpoint monitoring settings of the Traffic Manager profile.
- returned: always
- type: complex
- contains:
- protocol:
- description:
- - The protocol C(HTTP), C(HTTPS) or C(TCP) used to probe for endpoint health.
- returned: always
- type: str
- sample: HTTP
- port:
- description:
- - The TCP port used to probe for endpoint health.
- returned: always
- type: int
- sample: 80
- path:
- description:
- - The path relative to the endpoint domain name used to probe for endpoint health.
- returned: always
- type: str
- sample: /
- interval:
- description:
- - The monitor interval for endpoints in this profile in seconds.
- returned: always
- type: int
- sample: 10
- timeout:
- description:
- - The monitor timeout for endpoints in this profile in seconds.
- returned: always
- type: int
- sample: 30
- tolerated_failures:
- description:
- - The number of consecutive failed health check before declaring an endpoint Degraded after the next failed health check.
- returned: always
- type: int
- sample: 3
- endpoints:
- description:
- - The list of endpoints in the Traffic Manager profile.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Fully qualified resource ID for the resource.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/trafficMan
- agerProfiles/tmtest/externalEndpoints/e1"
- name:
- description:
- - The name of the endpoint.
- returned: always
- type: str
- sample: e1
- type:
- description:
- - The type of the endpoint.
- returned: always
- type: str
- sample: external_endpoints
- target_resource_id:
- description:
- - The Azure Resource URI of the of the endpoint.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ClassicCompute/dom
- ainNames/vscjavaci"
- target:
- description:
- - The fully-qualified DNS name of the endpoint.
- returned: always
- type: str
- sample: 8.8.8.8
- status:
- description:
- - The status of the endpoint.
- returned: always
- type: str
- sample: Enabled
- weight:
- description:
- - The weight of this endpoint when the profile has I(routing_method=weighted).
- returned: always
- type: int
- sample: 10
- priority:
- description:
- - The priority of this endpoint when the profile has I(routing_method=priority).
- returned: always
- type: str
- sample: 3
- location:
- description:
- - The location of endpoints when I(type=external_endpoints) or I(type=nested_endpoints), and profile I(routing_method=performance).
- returned: always
- type: str
- sample: East US
- min_child_endpoints:
- description:
- - The minimum number of endpoints that must be available in the child profile to make the parent profile available.
- returned: always
- type: int
- sample: 3
- geo_mapping:
- description:
- - The list of countries/regions mapped to this endpoint when the profile has routing_method C(geographic).
- returned: always
- type: list
- sample: [
- "GEO-NA",
- "GEO-AS"
- ]
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.common.dict_transformations import _camel_to_snake
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.common import AzureHttpError
-except Exception:
- # handled in azure_rm_common
- pass
-
-import re
-
-AZURE_OBJECT_CLASS = 'trafficManagerProfiles'
-
-
-def serialize_endpoint(endpoint):
- result = dict(
- id=endpoint.id,
- name=endpoint.name,
- target_resource_id=endpoint.target_resource_id,
- target=endpoint.target,
- status=endpoint.endpoint_status,
- weight=endpoint.weight,
- priority=endpoint.priority,
- location=endpoint.endpoint_location,
- min_child_endpoints=endpoint.min_child_endpoints,
- geo_mapping=endpoint.geo_mapping,
- )
-
- if endpoint.type:
- result['type'] = _camel_to_snake(endpoint.type.split("/")[-1])
-
- return result
-
-
-class AzureRMTrafficManagerProfileInfo(AzureRMModuleBase):
- """Utility class to get Azure Traffic Manager profile facts"""
-
- def __init__(self):
-
- self.module_args = dict(
- name=dict(type='str'),
- resource_group=dict(type='str'),
- tags=dict(type='list')
- )
-
- self.results = dict(
- changed=False,
- tms=[]
- )
-
- self.name = None
- self.resource_group = None
- self.tags = None
-
- super(AzureRMTrafficManagerProfileInfo, self).__init__(
- derived_arg_spec=self.module_args,
- supports_tags=False,
- facts_module=True
- )
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_trafficmanagerprofile_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_trafficmanagerprofile_facts' module has been renamed to 'azure_rm_trafficmanagerprofile_info'", version='2.13')
-
- for key in self.module_args:
- setattr(self, key, kwargs[key])
-
- if self.name and not self.resource_group:
- self.fail("Parameter error: resource group required when filtering by name.")
-
- if self.name:
- self.results['tms'] = self.get_item()
- elif self.resource_group:
- self.results['tms'] = self.list_resource_group()
- else:
- self.results['tms'] = self.list_all()
-
- return self.results
-
- def get_item(self):
- """Get a single Azure Traffic Manager profile"""
-
- self.log('Get properties for {0}'.format(self.name))
-
- item = None
- result = []
-
- try:
- item = self.traffic_manager_management_client.profiles.get(
- self.resource_group, self.name)
- except CloudError:
- pass
-
- if item and self.has_tags(item.tags, self.tags):
- result = [self.serialize_tm(item)]
-
- return result
-
- def list_resource_group(self):
- """Get all Azure Traffic Managers profiles within a resource group"""
-
- self.log('List all Azure Traffic Managers within a resource group')
-
- try:
- response = self.traffic_manager_management_client.profiles.list_by_resource_group(
- self.resource_group)
- except AzureHttpError as exc:
- self.fail('Failed to list all items - {0}'.format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.serialize_tm(item))
-
- return results
-
- def list_all(self):
- """Get all Azure Traffic Manager profiles within a subscription"""
- self.log('List all Traffic Manager profiles within a subscription')
- try:
- response = self.traffic_manager_management_client.profiles.list_by_subscription()
- except Exception as exc:
- self.fail("Error listing all items - {0}".format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.serialize_tm(item))
- return results
-
- def serialize_tm(self, tm):
- '''
- Convert a Traffic Manager profile object to dict.
- :param tm: Traffic Manager profile object
- :return: dict
- '''
- result = self.serialize_obj(tm, AZURE_OBJECT_CLASS)
-
- new_result = {}
- new_result['id'] = tm.id
- new_result['resource_group'] = re.sub('\\/.*', '', re.sub('.*resourceGroups\\/', '', result['id']))
- new_result['name'] = tm.name
- new_result['state'] = 'present'
- new_result['location'] = tm.location
- new_result['profile_status'] = tm.profile_status
- new_result['routing_method'] = tm.traffic_routing_method.lower()
- new_result['dns_config'] = dict(
- relative_name=tm.dns_config.relative_name,
- fqdn=tm.dns_config.fqdn,
- ttl=tm.dns_config.ttl
- )
- new_result['monitor_config'] = dict(
- profile_monitor_status=tm.monitor_config.profile_monitor_status,
- protocol=tm.monitor_config.protocol,
- port=tm.monitor_config.port,
- path=tm.monitor_config.path,
- interval=tm.monitor_config.interval_in_seconds,
- timeout=tm.monitor_config.timeout_in_seconds,
- tolerated_failures=tm.monitor_config.tolerated_number_of_failures
- )
- new_result['endpoints'] = [serialize_endpoint(endpoint) for endpoint in tm.endpoints]
- new_result['tags'] = tm.tags
- return new_result
-
-
-def main():
- """Main module execution code path"""
-
- AzureRMTrafficManagerProfileInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py
deleted file mode 100644
index 449924c043..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py
+++ /dev/null
@@ -1,2203 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-# Copyright (c) 2018 James E. King, III (@jeking3) <jking@apache.org>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_virtualmachine
-
-version_added: "2.1"
-
-short_description: Manage Azure virtual machines
-
-description:
- - Manage and configure virtual machines (VMs) and associated resources on Azure.
- - Requires a resource group containing at least one virtual network with at least one subnet.
- - Supports images from the Azure Marketplace, which can be discovered with M(azure_rm_virtualmachineimage_facts).
- - Supports custom images since Ansible 2.5.
- - To use I(custom_data) on a Linux image, the image must have cloud-init enabled. If cloud-init is not enabled, I(custom_data) is ignored.
-
-options:
- resource_group:
- description:
- - Name of the resource group containing the VM.
- required: true
- name:
- description:
- - Name of the VM.
- required: true
- custom_data:
- description:
- - Data made available to the VM and used by C(cloud-init).
- - Only used on Linux images with C(cloud-init) enabled.
- - Consult U(https://docs.microsoft.com/en-us/azure/virtual-machines/linux/using-cloud-init#cloud-init-overview) for cloud-init ready images.
- - To enable cloud-init on a Linux image, follow U(https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cloudinit-prepare-custom-image).
- version_added: "2.5"
- state:
- description:
- - State of the VM.
- - Set to C(present) to create a VM with the configuration specified by other options, or to update the configuration of an existing VM.
- - Set to C(absent) to remove a VM.
- - Does not affect power state. Use I(started)/I(allocated)/I(restarted) parameters to change the power state of a VM.
- default: present
- choices:
- - absent
- - present
- started:
- description:
- - Whether the VM is started or stopped.
- - Set to (true) with I(state=present) to start the VM.
- - Set to C(false) to stop the VM.
- default: true
- type: bool
- allocated:
- description:
- - Whether the VM is allocated or deallocated, only useful with I(state=present).
- default: True
- type: bool
- generalized:
- description:
- - Whether the VM is generalized or not.
- - Set to C(true) with I(state=present) to generalize the VM.
- - Generalizing a VM is irreversible.
- type: bool
- version_added: "2.8"
- restarted:
- description:
- - Set to C(true) with I(state=present) to restart a running VM.
- type: bool
- location:
- description:
- - Valid Azure location for the VM. Defaults to location of the resource group.
- short_hostname:
- description:
- - Name assigned internally to the host. On a Linux VM this is the name returned by the C(hostname) command.
- - When creating a VM, short_hostname defaults to I(name).
- vm_size:
- description:
- - A valid Azure VM size value. For example, C(Standard_D4).
- - Choices vary depending on the subscription and location. Check your subscription for available choices.
- - Required when creating a VM.
- admin_username:
- description:
- - Admin username used to access the VM after it is created.
- - Required when creating a VM.
- admin_password:
- description:
- - Password for the admin username.
- - Not required if the I(os_type=Linux) and SSH password authentication is disabled by setting I(ssh_password_enabled=false).
- ssh_password_enabled:
- description:
- - Whether to enable or disable SSH passwords.
- - When I(os_type=Linux), set to C(false) to disable SSH password authentication and require use of SSH keys.
- default: true
- type: bool
- ssh_public_keys:
- description:
- - For I(os_type=Linux) provide a list of SSH keys.
- - Accepts a list of dicts where each dictionary contains two keys, I(path) and I(key_data).
- - Set I(path) to the default location of the authorized_keys files. For example, I(path=/home/<admin username>/.ssh/authorized_keys).
- - Set I(key_data) to the actual value of the public key.
- image:
- description:
- - The image used to build the VM.
- - For custom images, the name of the image. To narrow the search to a specific resource group, a dict with the keys I(name) and I(resource_group).
- - For Marketplace images, a dict with the keys I(publisher), I(offer), I(sku), and I(version).
- - Set I(version=latest) to get the most recent version of a given image.
- required: true
- availability_set:
- description:
- - Name or ID of an existing availability set to add the VM to. The I(availability_set) should be in the same resource group as VM.
- version_added: "2.5"
- storage_account_name:
- description:
- - Name of a storage account that supports creation of VHD blobs.
- - If not specified for a new VM, a new storage account named <vm name>01 will be created using storage type C(Standard_LRS).
- aliases:
- - storage_account
- storage_container_name:
- description:
- - Name of the container to use within the storage account to store VHD blobs.
- - If not specified, a default container will be created.
- default: vhds
- aliases:
- - storage_container
- storage_blob_name:
- description:
- - Name of the storage blob used to hold the OS disk image of the VM.
- - Must end with '.vhd'.
- - If not specified, defaults to the VM name + '.vhd'.
- aliases:
- - storage_blob
- managed_disk_type:
- description:
- - Managed OS disk type.
- - Create OS disk with managed disk if defined.
- - If not defined, the OS disk will be created with virtual hard disk (VHD).
- choices:
- - Standard_LRS
- - StandardSSD_LRS
- - Premium_LRS
- version_added: "2.4"
- os_disk_name:
- description:
- - OS disk name.
- version_added: "2.8"
- os_disk_caching:
- description:
- - Type of OS disk caching.
- choices:
- - ReadOnly
- - ReadWrite
- aliases:
- - disk_caching
- os_disk_size_gb:
- description:
- - Type of OS disk size in GB.
- version_added: "2.7"
- os_type:
- description:
- - Base type of operating system.
- choices:
- - Windows
- - Linux
- default: Linux
- data_disks:
- description:
- - Describes list of data disks.
- - Use M(azure_rm_mangeddisk) to manage the specific disk.
- version_added: "2.4"
- suboptions:
- lun:
- description:
- - The logical unit number for data disk.
- - This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM.
- required: true
- version_added: "2.4"
- disk_size_gb:
- description:
- - The initial disk size in GB for blank data disks.
- - This value cannot be larger than C(1023) GB.
- - Size can be changed only when the virtual machine is deallocated.
- - Not sure when I(managed_disk_id) defined.
- version_added: "2.4"
- managed_disk_type:
- description:
- - Managed data disk type.
- - Only used when OS disk created with managed disk.
- choices:
- - Standard_LRS
- - StandardSSD_LRS
- - Premium_LRS
- version_added: "2.4"
- storage_account_name:
- description:
- - Name of an existing storage account that supports creation of VHD blobs.
- - If not specified for a new VM, a new storage account started with I(name) will be created using storage type C(Standard_LRS).
- - Only used when OS disk created with virtual hard disk (VHD).
- - Used when I(managed_disk_type) not defined.
- - Cannot be updated unless I(lun) updated.
- version_added: "2.4"
- storage_container_name:
- description:
- - Name of the container to use within the storage account to store VHD blobs.
- - If no name is specified a default container named 'vhds' will created.
- - Only used when OS disk created with virtual hard disk (VHD).
- - Used when I(managed_disk_type) not defined.
- - Cannot be updated unless I(lun) updated.
- default: vhds
- version_added: "2.4"
- storage_blob_name:
- description:
- - Name of the storage blob used to hold the OS disk image of the VM.
- - Must end with '.vhd'.
- - Default to the I(name) + timestamp + I(lun) + '.vhd'.
- - Only used when OS disk created with virtual hard disk (VHD).
- - Used when I(managed_disk_type) not defined.
- - Cannot be updated unless I(lun) updated.
- version_added: "2.4"
- caching:
- description:
- - Type of data disk caching.
- choices:
- - ReadOnly
- - ReadWrite
- default: ReadOnly
- version_added: "2.4"
- public_ip_allocation_method:
- description:
- - Allocation method for the public IP of the VM.
- - Used only if a network interface is not specified.
- - When set to C(Dynamic), the public IP address may change any time the VM is rebooted or power cycled.
- - The C(Disabled) choice was added in Ansible 2.6.
- choices:
- - Dynamic
- - Static
- - Disabled
- default: Static
- aliases:
- - public_ip_allocation
- open_ports:
- description:
- - List of ports to open in the security group for the VM, when a security group and network interface are created with a VM.
- - For Linux hosts, defaults to allowing inbound TCP connections to port 22.
- - For Windows hosts, defaults to opening ports 3389 and 5986.
- network_interface_names:
- description:
- - Network interface names to add to the VM.
- - Can be a string of name or resource ID of the network interface.
- - Can be a dict containing I(resource_group) and I(name) of the network interface.
- - If a network interface name is not provided when the VM is created, a default network interface will be created.
- - To create a new network interface, at least one Virtual Network with one Subnet must exist.
- type: list
- aliases:
- - network_interfaces
- virtual_network_resource_group:
- description:
- - The resource group to use when creating a VM with another resource group's virtual network.
- version_added: "2.4"
- virtual_network_name:
- description:
- - The virtual network to use when creating a VM.
- - If not specified, a new network interface will be created and assigned to the first virtual network found in the resource group.
- - Use with I(virtual_network_resource_group) to place the virtual network in another resource group.
- aliases:
- - virtual_network
- subnet_name:
- description:
- - Subnet for the VM.
- - Defaults to the first subnet found in the virtual network or the subnet of the I(network_interface_name), if provided.
- - If the subnet is in another resource group, specify the resource group with I(virtual_network_resource_group).
- aliases:
- - subnet
- remove_on_absent:
- description:
- - Associated resources to remove when removing a VM using I(state=absent).
- - To remove all resources related to the VM being removed, including auto-created resources, set to C(all).
- - To remove only resources that were automatically created while provisioning the VM being removed, set to C(all_autocreated).
- - To remove only specific resources, set to C(network_interfaces), C(virtual_storage) or C(public_ips).
- - Any other input will be ignored.
- type: list
- default: ['all']
- plan:
- description:
- - Third-party billing plan for the VM.
- version_added: "2.5"
- type: dict
- suboptions:
- name:
- description:
- - Billing plan name.
- required: true
- product:
- description:
- - Product name.
- required: true
- publisher:
- description:
- - Publisher offering the plan.
- required: true
- promotion_code:
- description:
- - Optional promotion code.
- accept_terms:
- description:
- - Accept terms for Marketplace images that require it.
- - Only Azure service admin/account admin users can purchase images from the Marketplace.
- - Only valid when a I(plan) is specified.
- type: bool
- default: false
- version_added: "2.7"
- zones:
- description:
- - A list of Availability Zones for your VM.
- type: list
- version_added: "2.8"
- license_type:
- description:
- - On-premise license for the image or disk.
- - Only used for images that contain the Windows Server operating system.
- - To remove all license type settings, set to the string C(None).
- version_added: "2.8"
- choices:
- - Windows_Server
- - Windows_Client
- vm_identity:
- description:
- - Identity for the VM.
- version_added: "2.8"
- choices:
- - SystemAssigned
- winrm:
- description:
- - List of Windows Remote Management configurations of the VM.
- version_added: "2.8"
- suboptions:
- protocol:
- description:
- - The protocol of the winrm listener.
- required: true
- choices:
- - http
- - https
- source_vault:
- description:
- - The relative URL of the Key Vault containing the certificate.
- certificate_url:
- description:
- - The URL of a certificate that has been uploaded to Key Vault as a secret.
- certificate_store:
- description:
- - The certificate store on the VM to which the certificate should be added.
- - The specified certificate store is implicitly in the LocalMachine account.
- boot_diagnostics:
- description:
- - Manage boot diagnostics settings for a VM.
- - Boot diagnostics includes a serial console and remote console screenshots.
- version_added: '2.9'
- suboptions:
- enabled:
- description:
- - Flag indicating if boot diagnostics are enabled.
- required: true
- type: bool
- storage_account:
- description:
- - The name of an existing storage account to use for boot diagnostics.
- - If not specified, uses I(storage_account_name) defined one level up.
- - If storage account is not specified anywhere, and C(enabled) is C(true), a default storage account is created for boot diagnostics data.
- required: false
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Chris Houseknecht (@chouseknecht)
- - Matt Davis (@nitzmahone)
- - Christopher Perrin (@cperrin88)
- - James E. King III (@jeking3)
-'''
-EXAMPLES = '''
-
-- name: Create VM with defaults
- azure_rm_virtualmachine:
- resource_group: myResourceGroup
- name: testvm10
- admin_username: chouseknecht
- admin_password: <your password here>
- image:
- offer: CentOS
- publisher: OpenLogic
- sku: '7.1'
- version: latest
-
-- name: Create an availability set for managed disk vm
- azure_rm_availabilityset:
- name: avs-managed-disk
- resource_group: myResourceGroup
- platform_update_domain_count: 5
- platform_fault_domain_count: 2
- sku: Aligned
-
-- name: Create a VM with managed disk
- azure_rm_virtualmachine:
- resource_group: myResourceGroup
- name: vm-managed-disk
- admin_username: adminUser
- availability_set: avs-managed-disk
- managed_disk_type: Standard_LRS
- image:
- offer: CoreOS
- publisher: CoreOS
- sku: Stable
- version: latest
- vm_size: Standard_D4
-
-- name: Create a VM with existing storage account and NIC
- azure_rm_virtualmachine:
- resource_group: myResourceGroup
- name: testvm002
- vm_size: Standard_D4
- storage_account: testaccount001
- admin_username: adminUser
- ssh_public_keys:
- - path: /home/adminUser/.ssh/authorized_keys
- key_data: < insert yor ssh public key here... >
- network_interfaces: testvm001
- image:
- offer: CentOS
- publisher: OpenLogic
- sku: '7.1'
- version: latest
-
-- name: Create a VM with OS and multiple data managed disks
- azure_rm_virtualmachine:
- resource_group: myResourceGroup
- name: testvm001
- vm_size: Standard_D4
- managed_disk_type: Standard_LRS
- admin_username: adminUser
- ssh_public_keys:
- - path: /home/adminUser/.ssh/authorized_keys
- key_data: < insert yor ssh public key here... >
- image:
- offer: CoreOS
- publisher: CoreOS
- sku: Stable
- version: latest
- data_disks:
- - lun: 0
- managed_disk_id: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/disks/myDisk"
- - lun: 1
- disk_size_gb: 128
- managed_disk_type: Premium_LRS
-
-- name: Create a VM with OS and multiple data storage accounts
- azure_rm_virtualmachine:
- resource_group: myResourceGroup
- name: testvm001
- vm_size: Standard_DS1_v2
- admin_username: adminUser
- ssh_password_enabled: false
- ssh_public_keys:
- - path: /home/adminUser/.ssh/authorized_keys
- key_data: < insert yor ssh public key here... >
- network_interfaces: testvm001
- storage_container: osdisk
- storage_blob: osdisk.vhd
- boot_diagnostics:
- enabled: yes
- image:
- offer: CoreOS
- publisher: CoreOS
- sku: Stable
- version: latest
- data_disks:
- - lun: 0
- disk_size_gb: 64
- storage_container_name: datadisk1
- storage_blob_name: datadisk1.vhd
- - lun: 1
- disk_size_gb: 128
- storage_container_name: datadisk2
- storage_blob_name: datadisk2.vhd
-
-- name: Create a VM with a custom image
- azure_rm_virtualmachine:
- resource_group: myResourceGroup
- name: testvm001
- vm_size: Standard_DS1_v2
- admin_username: adminUser
- admin_password: password01
- image: customimage001
-
-- name: Create a VM with a custom image from a particular resource group
- azure_rm_virtualmachine:
- resource_group: myResourceGroup
- name: testvm001
- vm_size: Standard_DS1_v2
- admin_username: adminUser
- admin_password: password01
- image:
- name: customimage001
- resource_group: myResourceGroup
-
-- name: Create a VM with an image id
- azure_rm_virtualmachine:
- resource_group: myResourceGroup
- name: testvm001
- vm_size: Standard_DS1_v2
- admin_username: adminUser
- admin_password: password01
- image:
- id: '{{image_id}}'
-
-- name: Create VM with specified OS disk size
- azure_rm_virtualmachine:
- resource_group: myResourceGroup
- name: big-os-disk
- admin_username: chouseknecht
- admin_password: <your password here>
- os_disk_size_gb: 512
- image:
- offer: CentOS
- publisher: OpenLogic
- sku: '7.1'
- version: latest
-
-- name: Create VM with OS and Plan, accepting the terms
- azure_rm_virtualmachine:
- resource_group: myResourceGroup
- name: f5-nva
- admin_username: chouseknecht
- admin_password: <your password here>
- image:
- publisher: f5-networks
- offer: f5-big-ip-best
- sku: f5-bigip-virtual-edition-200m-best-hourly
- version: latest
- plan:
- name: f5-bigip-virtual-edition-200m-best-hourly
- product: f5-big-ip-best
- publisher: f5-networks
-
-- name: Power Off
- azure_rm_virtualmachine:
- resource_group: myResourceGroup
- name: testvm002
- started: no
-
-- name: Deallocate
- azure_rm_virtualmachine:
- resource_group: myResourceGroup
- name: testvm002
- allocated: no
-
-- name: Power On
- azure_rm_virtualmachine:
- resource_group: myResourceGroup
- name: testvm002
-
-- name: Restart
- azure_rm_virtualmachine:
- resource_group: myResourceGroup
- name: testvm002
- restarted: yes
-
-- name: Create a VM with an Availability Zone
- azure_rm_virtualmachine:
- resource_group: myResourceGroup
- name: testvm001
- vm_size: Standard_DS1_v2
- admin_username: adminUser
- admin_password: password01
- image: customimage001
- zones: [1]
-
-- name: Remove a VM and all resources that were autocreated
- azure_rm_virtualmachine:
- resource_group: myResourceGroup
- name: testvm002
- remove_on_absent: all_autocreated
- state: absent
-'''
-
-RETURN = '''
-powerstate:
- description:
- - Indicates if the state is C(running), C(stopped), C(deallocated), C(generalized).
- returned: always
- type: str
- sample: running
-deleted_vhd_uris:
- description:
- - List of deleted Virtual Hard Disk URIs.
- returned: 'on delete'
- type: list
- sample: ["https://testvm104519.blob.core.windows.net/vhds/testvm10.vhd"]
-deleted_network_interfaces:
- description:
- - List of deleted NICs.
- returned: 'on delete'
- type: list
- sample: ["testvm1001"]
-deleted_public_ips:
- description:
- - List of deleted public IP address names.
- returned: 'on delete'
- type: list
- sample: ["testvm1001"]
-azure_vm:
- description:
- - Facts about the current state of the object. Note that facts are not part of the registered output but available directly.
- returned: always
- type: dict
- sample: {
- "properties": {
- "availabilitySet": {
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Compute/availabilitySets/MYAVAILABILITYSET"
- },
- "hardwareProfile": {
- "vmSize": "Standard_D1"
- },
- "instanceView": {
- "disks": [
- {
- "name": "testvm10.vhd",
- "statuses": [
- {
- "code": "ProvisioningState/succeeded",
- "displayStatus": "Provisioning succeeded",
- "level": "Info",
- "time": "2016-03-30T07:11:16.187272Z"
- }
- ]
- }
- ],
- "statuses": [
- {
- "code": "ProvisioningState/succeeded",
- "displayStatus": "Provisioning succeeded",
- "level": "Info",
- "time": "2016-03-30T20:33:38.946916Z"
- },
- {
- "code": "PowerState/running",
- "displayStatus": "VM running",
- "level": "Info"
- }
- ],
- "vmAgent": {
- "extensionHandlers": [],
- "statuses": [
- {
- "code": "ProvisioningState/succeeded",
- "displayStatus": "Ready",
- "level": "Info",
- "message": "GuestAgent is running and accepting new configurations.",
- "time": "2016-03-30T20:31:16.000Z"
- }
- ],
- "vmAgentVersion": "WALinuxAgent-2.0.16"
- }
- },
- "networkProfile": {
- "networkInterfaces": [
- {
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01",
- "name": "testvm10_NIC01",
- "properties": {
- "dnsSettings": {
- "appliedDnsServers": [],
- "dnsServers": []
- },
- "enableIPForwarding": false,
- "ipConfigurations": [
- {
- "etag": 'W/"041c8c2a-d5dd-4cd7-8465-9125cfbe2cf8"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01/ipConfigurations/default",
- "name": "default",
- "properties": {
- "privateIPAddress": "10.10.0.5",
- "privateIPAllocationMethod": "Dynamic",
- "provisioningState": "Succeeded",
- "publicIPAddress": {
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/publicIPAddresses/testvm10_PIP01",
- "name": "testvm10_PIP01",
- "properties": {
- "idleTimeoutInMinutes": 4,
- "ipAddress": "13.92.246.197",
- "ipConfiguration": {
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkInterfaces/testvm10_NIC01/ipConfigurations/default"
- },
- "provisioningState": "Succeeded",
- "publicIPAllocationMethod": "Static",
- "resourceGuid": "3447d987-ca0d-4eca-818b-5dddc0625b42"
- }
- }
- }
- }
- ],
- "macAddress": "00-0D-3A-12-AA-14",
- "primary": true,
- "provisioningState": "Succeeded",
- "resourceGuid": "10979e12-ccf9-42ee-9f6d-ff2cc63b3844",
- "virtualMachine": {
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Compute/virtualMachines/testvm10"
- }
- }
- }
- ]
- },
- "osProfile": {
- "adminUsername": "chouseknecht",
- "computerName": "test10",
- "linuxConfiguration": {
- "disablePasswordAuthentication": false
- },
- "secrets": []
- },
- "provisioningState": "Succeeded",
- "storageProfile": {
- "dataDisks": [
- {
- "caching": "ReadWrite",
- "createOption": "empty",
- "diskSizeGB": 64,
- "lun": 0,
- "name": "datadisk1.vhd",
- "vhd": {
- "uri": "https://testvm10sa1.blob.core.windows.net/datadisk/datadisk1.vhd"
- }
- }
- ],
- "imageReference": {
- "offer": "CentOS",
- "publisher": "OpenLogic",
- "sku": "7.1",
- "version": "7.1.20160308"
- },
- "osDisk": {
- "caching": "ReadOnly",
- "createOption": "fromImage",
- "name": "testvm10.vhd",
- "osType": "Linux",
- "vhd": {
- "uri": "https://testvm10sa1.blob.core.windows.net/vhds/testvm10.vhd"
- }
- }
- }
- },
- "type": "Microsoft.Compute/virtualMachines"
- }
-''' # NOQA
-
-import base64
-import random
-import re
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.tools import parse_resource_id
- from msrest.polling import LROPoller
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.basic import to_native, to_bytes
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict, normalize_location_name, format_resource_id
-
-
-AZURE_OBJECT_CLASS = 'VirtualMachine'
-
-AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
-
-
-def extract_names_from_blob_uri(blob_uri, storage_suffix):
- # HACK: ditch this once python SDK supports get by URI
- m = re.match(r'^https?://(?P<accountname>[^.]+)\.blob\.{0}/'
- r'(?P<containername>[^/]+)/(?P<blobname>.+)$'.format(storage_suffix), blob_uri)
- if not m:
- raise Exception("unable to parse blob uri '%s'" % blob_uri)
- extracted_names = m.groupdict()
- return extracted_names
-
-
-class AzureRMVirtualMachine(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- custom_data=dict(type='str'),
- state=dict(choices=['present', 'absent'], default='present', type='str'),
- location=dict(type='str'),
- short_hostname=dict(type='str'),
- vm_size=dict(type='str'),
- admin_username=dict(type='str'),
- admin_password=dict(type='str', no_log=True),
- ssh_password_enabled=dict(type='bool', default=True),
- ssh_public_keys=dict(type='list'),
- image=dict(type='raw'),
- availability_set=dict(type='str'),
- storage_account_name=dict(type='str', aliases=['storage_account']),
- storage_container_name=dict(type='str', aliases=['storage_container'], default='vhds'),
- storage_blob_name=dict(type='str', aliases=['storage_blob']),
- os_disk_caching=dict(type='str', aliases=['disk_caching'], choices=['ReadOnly', 'ReadWrite']),
- os_disk_size_gb=dict(type='int'),
- managed_disk_type=dict(type='str', choices=['Standard_LRS', 'StandardSSD_LRS', 'Premium_LRS']),
- os_disk_name=dict(type='str'),
- os_type=dict(type='str', choices=['Linux', 'Windows'], default='Linux'),
- public_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static', 'Disabled'], default='Static',
- aliases=['public_ip_allocation']),
- open_ports=dict(type='list'),
- network_interface_names=dict(type='list', aliases=['network_interfaces'], elements='raw'),
- remove_on_absent=dict(type='list', default=['all']),
- virtual_network_resource_group=dict(type='str'),
- virtual_network_name=dict(type='str', aliases=['virtual_network']),
- subnet_name=dict(type='str', aliases=['subnet']),
- allocated=dict(type='bool', default=True),
- restarted=dict(type='bool', default=False),
- started=dict(type='bool', default=True),
- generalized=dict(type='bool', default=False),
- data_disks=dict(type='list'),
- plan=dict(type='dict'),
- zones=dict(type='list'),
- accept_terms=dict(type='bool', default=False),
- license_type=dict(type='str', choices=['Windows_Server', 'Windows_Client']),
- vm_identity=dict(type='str', choices=['SystemAssigned']),
- winrm=dict(type='list'),
- boot_diagnostics=dict(type='dict'),
- )
-
- self.resource_group = None
- self.name = None
- self.custom_data = None
- self.state = None
- self.location = None
- self.short_hostname = None
- self.vm_size = None
- self.admin_username = None
- self.admin_password = None
- self.ssh_password_enabled = None
- self.ssh_public_keys = None
- self.image = None
- self.availability_set = None
- self.storage_account_name = None
- self.storage_container_name = None
- self.storage_blob_name = None
- self.os_type = None
- self.os_disk_caching = None
- self.os_disk_size_gb = None
- self.managed_disk_type = None
- self.os_disk_name = None
- self.network_interface_names = None
- self.remove_on_absent = set()
- self.tags = None
- self.force = None
- self.public_ip_allocation_method = None
- self.open_ports = None
- self.virtual_network_resource_group = None
- self.virtual_network_name = None
- self.subnet_name = None
- self.allocated = None
- self.restarted = None
- self.started = None
- self.generalized = None
- self.differences = None
- self.data_disks = None
- self.plan = None
- self.accept_terms = None
- self.zones = None
- self.license_type = None
- self.vm_identity = None
- self.boot_diagnostics = None
-
- self.results = dict(
- changed=False,
- actions=[],
- powerstate_change=None,
- ansible_facts=dict(azure_vm=None)
- )
-
- super(AzureRMVirtualMachine, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True)
-
- @property
- def boot_diagnostics_present(self):
- return self.boot_diagnostics is not None and 'enabled' in self.boot_diagnostics
-
- def get_boot_diagnostics_storage_account(self, limited=False, vm_dict=None):
- """
- Get the boot diagnostics storage account.
-
- Arguments:
- - limited - if true, limit the logic to the boot_diagnostics storage account
- this is used if initial creation of the VM has a stanza with
- boot_diagnostics disabled, so we only create a storage account
- if the user specifies a storage account name inside the boot_diagnostics
- schema
- - vm_dict - if invoked on an update, this is the current state of the vm including
- tags, like the default storage group tag '_own_sa_'.
-
- Normal behavior:
- - try the self.boot_diagnostics.storage_account field
- - if not there, try the self.storage_account_name field
- - if not there, use the default storage account
-
- If limited is True:
- - try the self.boot_diagnostics.storage_account field
- - if not there, None
- """
- bsa = None
- if 'storage_account' in self.boot_diagnostics:
- bsa = self.get_storage_account(self.boot_diagnostics['storage_account'])
- elif limited:
- return None
- elif self.storage_account_name:
- bsa = self.get_storage_account(self.storage_account_name)
- else:
- bsa = self.create_default_storage_account(vm_dict=vm_dict)
- self.log("boot diagnostics storage account:")
- self.log(self.serialize_obj(bsa, 'StorageAccount'), pretty_print=True)
- return bsa
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- # make sure options are lower case
- self.remove_on_absent = set([resource.lower() for resource in self.remove_on_absent])
-
- # convert elements to ints
- self.zones = [int(i) for i in self.zones] if self.zones else None
-
- changed = False
- powerstate_change = None
- results = dict()
- vm = None
- network_interfaces = []
- requested_storage_uri = None
- requested_vhd_uri = None
- data_disk_requested_vhd_uri = None
- disable_ssh_password = None
- vm_dict = None
- image_reference = None
- custom_image = False
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- # Set default location
- self.location = resource_group.location
-
- self.location = normalize_location_name(self.location)
-
- if self.state == 'present':
- # Verify parameters and resolve any defaults
-
- if self.vm_size and not self.vm_size_is_valid():
- self.fail("Parameter error: vm_size {0} is not valid for your subscription and location.".format(
- self.vm_size
- ))
-
- if self.network_interface_names:
- for nic_name in self.network_interface_names:
- nic = self.parse_network_interface(nic_name)
- network_interfaces.append(nic)
-
- if self.ssh_public_keys:
- msg = "Parameter error: expecting ssh_public_keys to be a list of type dict where " \
- "each dict contains keys: path, key_data."
- for key in self.ssh_public_keys:
- if not isinstance(key, dict):
- self.fail(msg)
- if not key.get('path') or not key.get('key_data'):
- self.fail(msg)
-
- if self.image and isinstance(self.image, dict):
- if all(key in self.image for key in ('publisher', 'offer', 'sku', 'version')):
- marketplace_image = self.get_marketplace_image_version()
- if self.image['version'] == 'latest':
- self.image['version'] = marketplace_image.name
- self.log("Using image version {0}".format(self.image['version']))
-
- image_reference = self.compute_models.ImageReference(
- publisher=self.image['publisher'],
- offer=self.image['offer'],
- sku=self.image['sku'],
- version=self.image['version']
- )
- elif self.image.get('name'):
- custom_image = True
- image_reference = self.get_custom_image_reference(
- self.image.get('name'),
- self.image.get('resource_group'))
- elif self.image.get('id'):
- try:
- image_reference = self.compute_models.ImageReference(id=self.image['id'])
- except Exception as exc:
- self.fail("id Error: Cannot get image from the reference id - {0}".format(self.image['id']))
- else:
- self.fail("parameter error: expecting image to contain [publisher, offer, sku, version], [name, resource_group] or [id]")
- elif self.image and isinstance(self.image, str):
- custom_image = True
- image_reference = self.get_custom_image_reference(self.image)
- elif self.image:
- self.fail("parameter error: expecting image to be a string or dict not {0}".format(type(self.image).__name__))
-
- if self.plan:
- if not self.plan.get('name') or not self.plan.get('product') or not self.plan.get('publisher'):
- self.fail("parameter error: plan must include name, product, and publisher")
-
- if not self.storage_blob_name and not self.managed_disk_type:
- self.storage_blob_name = self.name + '.vhd'
- elif self.managed_disk_type:
- self.storage_blob_name = self.name
-
- if self.storage_account_name and not self.managed_disk_type:
- properties = self.get_storage_account(self.storage_account_name)
-
- requested_storage_uri = properties.primary_endpoints.blob
- requested_vhd_uri = '{0}{1}/{2}'.format(requested_storage_uri,
- self.storage_container_name,
- self.storage_blob_name)
-
- disable_ssh_password = not self.ssh_password_enabled
-
- try:
- self.log("Fetching virtual machine {0}".format(self.name))
- vm = self.compute_client.virtual_machines.get(self.resource_group, self.name, expand='instanceview')
- self.check_provisioning_state(vm, self.state)
- vm_dict = self.serialize_vm(vm)
-
- if self.state == 'present':
- differences = []
- current_nics = []
- results = vm_dict
-
- # Try to determine if the VM needs to be updated
- if self.network_interface_names:
- for nic in vm_dict['properties']['networkProfile']['networkInterfaces']:
- current_nics.append(nic['id'])
-
- if set(current_nics) != set(network_interfaces):
- self.log('CHANGED: virtual machine {0} - network interfaces are different.'.format(self.name))
- differences.append('Network Interfaces')
- updated_nics = [dict(id=id, primary=(i == 0))
- for i, id in enumerate(network_interfaces)]
- vm_dict['properties']['networkProfile']['networkInterfaces'] = updated_nics
- changed = True
-
- if self.os_disk_caching and \
- self.os_disk_caching != vm_dict['properties']['storageProfile']['osDisk']['caching']:
- self.log('CHANGED: virtual machine {0} - OS disk caching'.format(self.name))
- differences.append('OS Disk caching')
- changed = True
- vm_dict['properties']['storageProfile']['osDisk']['caching'] = self.os_disk_caching
-
- if self.os_disk_name and \
- self.os_disk_name != vm_dict['properties']['storageProfile']['osDisk']['name']:
- self.log('CHANGED: virtual machine {0} - OS disk name'.format(self.name))
- differences.append('OS Disk name')
- changed = True
- vm_dict['properties']['storageProfile']['osDisk']['name'] = self.os_disk_name
-
- if self.os_disk_size_gb and \
- self.os_disk_size_gb != vm_dict['properties']['storageProfile']['osDisk'].get('diskSizeGB'):
- self.log('CHANGED: virtual machine {0} - OS disk size '.format(self.name))
- differences.append('OS Disk size')
- changed = True
- vm_dict['properties']['storageProfile']['osDisk']['diskSizeGB'] = self.os_disk_size_gb
-
- if self.vm_size and \
- self.vm_size != vm_dict['properties']['hardwareProfile']['vmSize']:
- self.log('CHANGED: virtual machine {0} - size '.format(self.name))
- differences.append('VM size')
- changed = True
- vm_dict['properties']['hardwareProfile']['vmSize'] = self.vm_size
-
- update_tags, vm_dict['tags'] = self.update_tags(vm_dict.get('tags', dict()))
- if update_tags:
- differences.append('Tags')
- changed = True
-
- if self.short_hostname and self.short_hostname != vm_dict['properties']['osProfile']['computerName']:
- self.log('CHANGED: virtual machine {0} - short hostname'.format(self.name))
- differences.append('Short Hostname')
- changed = True
- vm_dict['properties']['osProfile']['computerName'] = self.short_hostname
-
- if self.started and vm_dict['powerstate'] not in ['starting', 'running'] and self.allocated:
- self.log("CHANGED: virtual machine {0} not running and requested state 'running'".format(self.name))
- changed = True
- powerstate_change = 'poweron'
- elif self.state == 'present' and vm_dict['powerstate'] == 'running' and self.restarted:
- self.log("CHANGED: virtual machine {0} {1} and requested state 'restarted'"
- .format(self.name, vm_dict['powerstate']))
- changed = True
- powerstate_change = 'restarted'
- elif self.state == 'present' and not self.allocated and vm_dict['powerstate'] not in ['deallocated', 'deallocating']:
- self.log("CHANGED: virtual machine {0} {1} and requested state 'deallocated'"
- .format(self.name, vm_dict['powerstate']))
- changed = True
- powerstate_change = 'deallocated'
- elif not self.started and vm_dict['powerstate'] == 'running':
- self.log("CHANGED: virtual machine {0} running and requested state 'stopped'".format(self.name))
- changed = True
- powerstate_change = 'poweroff'
- elif self.generalized and vm_dict['powerstate'] != 'generalized':
- self.log("CHANGED: virtual machine {0} requested to be 'generalized'".format(self.name))
- changed = True
- powerstate_change = 'generalized'
-
- vm_dict['zones'] = [int(i) for i in vm_dict['zones']] if 'zones' in vm_dict and vm_dict['zones'] else None
- if self.zones and self.zones != vm_dict['zones']:
- self.log("CHANGED: virtual machine {0} zones".format(self.name))
- differences.append('Zones')
- changed = True
-
- if self.license_type is not None and vm_dict['properties'].get('licenseType') != self.license_type:
- differences.append('License Type')
- changed = True
-
- # Defaults for boot diagnostics
- if 'diagnosticsProfile' not in vm_dict['properties']:
- vm_dict['properties']['diagnosticsProfile'] = {}
- if 'bootDiagnostics' not in vm_dict['properties']['diagnosticsProfile']:
- vm_dict['properties']['diagnosticsProfile']['bootDiagnostics'] = {
- 'enabled': False,
- 'storageUri': None
- }
- if self.boot_diagnostics_present:
- current_boot_diagnostics = vm_dict['properties']['diagnosticsProfile']['bootDiagnostics']
- boot_diagnostics_changed = False
-
- if self.boot_diagnostics['enabled'] != current_boot_diagnostics['enabled']:
- current_boot_diagnostics['enabled'] = self.boot_diagnostics['enabled']
- boot_diagnostics_changed = True
-
- boot_diagnostics_storage_account = self.get_boot_diagnostics_storage_account(
- limited=not self.boot_diagnostics['enabled'], vm_dict=vm_dict)
- boot_diagnostics_blob = boot_diagnostics_storage_account.primary_endpoints.blob if boot_diagnostics_storage_account else None
- if current_boot_diagnostics['storageUri'] != boot_diagnostics_blob:
- current_boot_diagnostics['storageUri'] = boot_diagnostics_blob
- boot_diagnostics_changed = True
-
- if boot_diagnostics_changed:
- differences.append('Boot Diagnostics')
- changed = True
-
- # Adding boot diagnostics can create a default storage account after initial creation
- # this means we might also need to update the _own_sa_ tag
- own_sa = (self.tags or {}).get('_own_sa_', None)
- cur_sa = vm_dict.get('tags', {}).get('_own_sa_', None)
- if own_sa and own_sa != cur_sa:
- if 'Tags' not in differences:
- differences.append('Tags')
- if 'tags' not in vm_dict:
- vm_dict['tags'] = {}
- vm_dict['tags']['_own_sa_'] = own_sa
- changed = True
-
- self.differences = differences
-
- elif self.state == 'absent':
- self.log("CHANGED: virtual machine {0} exists and requested state is 'absent'".format(self.name))
- results = dict()
- changed = True
-
- except CloudError:
- self.log('Virtual machine {0} does not exist'.format(self.name))
- if self.state == 'present':
- self.log("CHANGED: virtual machine {0} does not exist but state is 'present'.".format(self.name))
- changed = True
-
- self.results['changed'] = changed
- self.results['ansible_facts']['azure_vm'] = results
- self.results['powerstate_change'] = powerstate_change
-
- if self.check_mode:
- return self.results
-
- if changed:
- if self.state == 'present':
- if not vm:
- # Create the VM
- self.log("Create virtual machine {0}".format(self.name))
- self.results['actions'].append('Created VM {0}'.format(self.name))
-
- if self.os_type == 'Linux':
- if disable_ssh_password and not self.ssh_public_keys:
- self.fail("Parameter error: ssh_public_keys required when disabling SSH password.")
-
- if not image_reference:
- self.fail("Parameter error: an image is required when creating a virtual machine.")
-
- availability_set_resource = None
- if self.availability_set:
- parsed_availability_set = parse_resource_id(self.availability_set)
- availability_set = self.get_availability_set(parsed_availability_set.get('resource_group', self.resource_group),
- parsed_availability_set.get('name'))
- availability_set_resource = self.compute_models.SubResource(id=availability_set.id)
-
- if self.zones:
- self.fail("Parameter error: you can't use Availability Set and Availability Zones at the same time")
-
- # Get defaults
- if not self.network_interface_names:
- default_nic = self.create_default_nic()
- self.log("network interface:")
- self.log(self.serialize_obj(default_nic, 'NetworkInterface'), pretty_print=True)
- network_interfaces = [default_nic.id]
-
- # os disk
- if not self.storage_account_name and not self.managed_disk_type:
- storage_account = self.create_default_storage_account()
- self.log("os disk storage account:")
- self.log(self.serialize_obj(storage_account, 'StorageAccount'), pretty_print=True)
- requested_storage_uri = 'https://{0}.blob.{1}/'.format(
- storage_account.name,
- self._cloud_environment.suffixes.storage_endpoint)
- requested_vhd_uri = '{0}{1}/{2}'.format(
- requested_storage_uri,
- self.storage_container_name,
- self.storage_blob_name)
- # disk caching
- if not self.os_disk_caching:
- self.os_disk_caching = 'ReadOnly'
-
- if not self.short_hostname:
- self.short_hostname = self.name
-
- nics = [self.compute_models.NetworkInterfaceReference(id=id, primary=(i == 0))
- for i, id in enumerate(network_interfaces)]
-
- # os disk
- if self.managed_disk_type:
- vhd = None
- managed_disk = self.compute_models.ManagedDiskParameters(storage_account_type=self.managed_disk_type)
- elif custom_image:
- vhd = None
- managed_disk = None
- else:
- vhd = self.compute_models.VirtualHardDisk(uri=requested_vhd_uri)
- managed_disk = None
-
- plan = None
- if self.plan:
- plan = self.compute_models.Plan(name=self.plan.get('name'), product=self.plan.get('product'),
- publisher=self.plan.get('publisher'),
- promotion_code=self.plan.get('promotion_code'))
-
- # do this before creating vm_resource as it can modify tags
- if self.boot_diagnostics_present and self.boot_diagnostics['enabled']:
- boot_diag_storage_account = self.get_boot_diagnostics_storage_account()
-
- os_profile = None
- if self.admin_username:
- os_profile = self.compute_models.OSProfile(
- admin_username=self.admin_username,
- computer_name=self.short_hostname,
- )
- vm_resource = self.compute_models.VirtualMachine(
- location=self.location,
- tags=self.tags,
- os_profile=os_profile,
- hardware_profile=self.compute_models.HardwareProfile(
- vm_size=self.vm_size
- ),
- storage_profile=self.compute_models.StorageProfile(
- os_disk=self.compute_models.OSDisk(
- name=self.os_disk_name if self.os_disk_name else self.storage_blob_name,
- vhd=vhd,
- managed_disk=managed_disk,
- create_option=self.compute_models.DiskCreateOptionTypes.from_image,
- caching=self.os_disk_caching,
- disk_size_gb=self.os_disk_size_gb
- ),
- image_reference=image_reference,
- ),
- network_profile=self.compute_models.NetworkProfile(
- network_interfaces=nics
- ),
- availability_set=availability_set_resource,
- plan=plan,
- zones=self.zones,
- )
-
- if self.license_type is not None:
- vm_resource.license_type = self.license_type
-
- if self.vm_identity:
- vm_resource.identity = self.compute_models.VirtualMachineIdentity(type=self.vm_identity)
-
- if self.winrm:
- winrm_listeners = list()
- for winrm_listener in self.winrm:
- winrm_listeners.append(self.compute_models.WinRMListener(
- protocol=winrm_listener.get('protocol'),
- certificate_url=winrm_listener.get('certificate_url')
- ))
- if winrm_listener.get('source_vault'):
- if not vm_resource.os_profile.secrets:
- vm_resource.os_profile.secrets = list()
-
- vm_resource.os_profile.secrets.append(self.compute_models.VaultSecretGroup(
- source_vault=self.compute_models.SubResource(
- id=winrm_listener.get('source_vault')
- ),
- vault_certificates=[
- self.compute_models.VaultCertificate(
- certificate_url=winrm_listener.get('certificate_url'),
- certificate_store=winrm_listener.get('certificate_store')
- ),
- ]
- ))
-
- winrm = self.compute_models.WinRMConfiguration(
- listeners=winrm_listeners
- )
-
- if not vm_resource.os_profile.windows_configuration:
- vm_resource.os_profile.windows_configuration = self.compute_models.WindowsConfiguration(
- win_rm=winrm
- )
- elif not vm_resource.os_profile.windows_configuration.win_rm:
- vm_resource.os_profile.windows_configuration.win_rm = winrm
-
- if self.boot_diagnostics_present:
- vm_resource.diagnostics_profile = self.compute_models.DiagnosticsProfile(
- boot_diagnostics=self.compute_models.BootDiagnostics(
- enabled=self.boot_diagnostics['enabled'],
- storage_uri=boot_diag_storage_account.primary_endpoints.blob))
-
- if self.admin_password:
- vm_resource.os_profile.admin_password = self.admin_password
-
- if self.custom_data:
- # Azure SDK (erroneously?) wants native string type for this
- vm_resource.os_profile.custom_data = to_native(base64.b64encode(to_bytes(self.custom_data)))
-
- if self.os_type == 'Linux' and os_profile:
- vm_resource.os_profile.linux_configuration = self.compute_models.LinuxConfiguration(
- disable_password_authentication=disable_ssh_password
- )
- if self.ssh_public_keys:
- ssh_config = self.compute_models.SshConfiguration()
- ssh_config.public_keys = \
- [self.compute_models.SshPublicKey(path=key['path'], key_data=key['key_data']) for key in self.ssh_public_keys]
- vm_resource.os_profile.linux_configuration.ssh = ssh_config
-
- # data disk
- if self.data_disks:
- data_disks = []
- count = 0
-
- for data_disk in self.data_disks:
- if not data_disk.get('managed_disk_type'):
- if not data_disk.get('storage_blob_name'):
- data_disk['storage_blob_name'] = self.name + '-data-' + str(count) + '.vhd'
- count += 1
-
- if data_disk.get('storage_account_name'):
- data_disk_storage_account = self.get_storage_account(data_disk['storage_account_name'])
- else:
- data_disk_storage_account = self.create_default_storage_account()
- self.log("data disk storage account:")
- self.log(self.serialize_obj(data_disk_storage_account, 'StorageAccount'), pretty_print=True)
-
- if not data_disk.get('storage_container_name'):
- data_disk['storage_container_name'] = 'vhds'
-
- data_disk_requested_vhd_uri = 'https://{0}.blob.{1}/{2}/{3}'.format(
- data_disk_storage_account.name,
- self._cloud_environment.suffixes.storage_endpoint,
- data_disk['storage_container_name'],
- data_disk['storage_blob_name']
- )
-
- if not data_disk.get('managed_disk_type'):
- data_disk_managed_disk = None
- disk_name = data_disk['storage_blob_name']
- data_disk_vhd = self.compute_models.VirtualHardDisk(uri=data_disk_requested_vhd_uri)
- else:
- data_disk_vhd = None
- data_disk_managed_disk = self.compute_models.ManagedDiskParameters(storage_account_type=data_disk['managed_disk_type'])
- disk_name = self.name + "-datadisk-" + str(count)
- count += 1
-
- data_disk['caching'] = data_disk.get(
- 'caching', 'ReadOnly'
- )
-
- data_disks.append(self.compute_models.DataDisk(
- lun=data_disk['lun'],
- name=disk_name,
- vhd=data_disk_vhd,
- caching=data_disk['caching'],
- create_option=self.compute_models.DiskCreateOptionTypes.empty,
- disk_size_gb=data_disk['disk_size_gb'],
- managed_disk=data_disk_managed_disk,
- ))
-
- vm_resource.storage_profile.data_disks = data_disks
-
- # Before creating VM accept terms of plan if `accept_terms` is True
- if self.accept_terms is True:
- if not self.plan or not all([self.plan.get('name'), self.plan.get('product'), self.plan.get('publisher')]):
- self.fail("parameter error: plan must be specified and include name, product, and publisher")
- try:
- plan_name = self.plan.get('name')
- plan_product = self.plan.get('product')
- plan_publisher = self.plan.get('publisher')
- term = self.marketplace_client.marketplace_agreements.get(
- publisher_id=plan_publisher, offer_id=plan_product, plan_id=plan_name)
- term.accepted = True
- self.marketplace_client.marketplace_agreements.create(
- publisher_id=plan_publisher, offer_id=plan_product, plan_id=plan_name, parameters=term)
- except Exception as exc:
- self.fail(("Error accepting terms for virtual machine {0} with plan {1}. " +
- "Only service admin/account admin users can purchase images " +
- "from the marketplace. - {2}").format(self.name, self.plan, str(exc)))
-
- self.log("Create virtual machine with parameters:")
- self.create_or_update_vm(vm_resource, 'all_autocreated' in self.remove_on_absent)
-
- elif self.differences and len(self.differences) > 0:
- # Update the VM based on detected config differences
-
- self.log("Update virtual machine {0}".format(self.name))
- self.results['actions'].append('Updated VM {0}'.format(self.name))
- nics = [self.compute_models.NetworkInterfaceReference(id=interface['id'], primary=(i == 0))
- for i, interface in enumerate(vm_dict['properties']['networkProfile']['networkInterfaces'])]
-
- # os disk
- if not vm_dict['properties']['storageProfile']['osDisk'].get('managedDisk'):
- managed_disk = None
- vhd = self.compute_models.VirtualHardDisk(uri=vm_dict['properties']['storageProfile']['osDisk'].get('vhd', {}).get('uri'))
- else:
- vhd = None
- managed_disk = self.compute_models.ManagedDiskParameters(
- storage_account_type=vm_dict['properties']['storageProfile']['osDisk']['managedDisk'].get('storageAccountType')
- )
-
- availability_set_resource = None
- try:
- availability_set_resource = self.compute_models.SubResource(id=vm_dict['properties']['availabilitySet'].get('id'))
- except Exception:
- # pass if the availability set is not set
- pass
-
- if 'imageReference' in vm_dict['properties']['storageProfile'].keys():
- if 'id' in vm_dict['properties']['storageProfile']['imageReference'].keys():
- image_reference = self.compute_models.ImageReference(
- id=vm_dict['properties']['storageProfile']['imageReference']['id']
- )
- else:
- image_reference = self.compute_models.ImageReference(
- publisher=vm_dict['properties']['storageProfile']['imageReference'].get('publisher'),
- offer=vm_dict['properties']['storageProfile']['imageReference'].get('offer'),
- sku=vm_dict['properties']['storageProfile']['imageReference'].get('sku'),
- version=vm_dict['properties']['storageProfile']['imageReference'].get('version')
- )
- else:
- image_reference = None
-
- # You can't change a vm zone
- if vm_dict['zones'] != self.zones:
- self.fail("You can't change the Availability Zone of a virtual machine (have: {0}, want: {1})".format(vm_dict['zones'], self.zones))
-
- if 'osProfile' in vm_dict['properties']:
- os_profile = self.compute_models.OSProfile(
- admin_username=vm_dict['properties'].get('osProfile', {}).get('adminUsername'),
- computer_name=vm_dict['properties'].get('osProfile', {}).get('computerName')
- )
- else:
- os_profile = None
-
- vm_resource = self.compute_models.VirtualMachine(
- location=vm_dict['location'],
- os_profile=os_profile,
- hardware_profile=self.compute_models.HardwareProfile(
- vm_size=vm_dict['properties']['hardwareProfile'].get('vmSize')
- ),
- storage_profile=self.compute_models.StorageProfile(
- os_disk=self.compute_models.OSDisk(
- name=vm_dict['properties']['storageProfile']['osDisk'].get('name'),
- vhd=vhd,
- managed_disk=managed_disk,
- create_option=vm_dict['properties']['storageProfile']['osDisk'].get('createOption'),
- os_type=vm_dict['properties']['storageProfile']['osDisk'].get('osType'),
- caching=vm_dict['properties']['storageProfile']['osDisk'].get('caching'),
- disk_size_gb=vm_dict['properties']['storageProfile']['osDisk'].get('diskSizeGB')
- ),
- image_reference=image_reference
- ),
- availability_set=availability_set_resource,
- network_profile=self.compute_models.NetworkProfile(
- network_interfaces=nics
- )
- )
-
- if self.license_type is not None:
- vm_resource.license_type = self.license_type
-
- if self.boot_diagnostics is not None:
- vm_resource.diagnostics_profile = self.compute_models.DiagnosticsProfile(
- boot_diagnostics=self.compute_models.BootDiagnostics(
- enabled=vm_dict['properties']['diagnosticsProfile']['bootDiagnostics']['enabled'],
- storage_uri=vm_dict['properties']['diagnosticsProfile']['bootDiagnostics']['storageUri']))
-
- if vm_dict.get('tags'):
- vm_resource.tags = vm_dict['tags']
-
- # Add custom_data, if provided
- if vm_dict['properties'].get('osProfile', {}).get('customData'):
- custom_data = vm_dict['properties']['osProfile']['customData']
- # Azure SDK (erroneously?) wants native string type for this
- vm_resource.os_profile.custom_data = to_native(base64.b64encode(to_bytes(custom_data)))
-
- # Add admin password, if one provided
- if vm_dict['properties'].get('osProfile', {}).get('adminPassword'):
- vm_resource.os_profile.admin_password = vm_dict['properties']['osProfile']['adminPassword']
-
- # Add linux configuration, if applicable
- linux_config = vm_dict['properties'].get('osProfile', {}).get('linuxConfiguration')
- if linux_config:
- ssh_config = linux_config.get('ssh', None)
- vm_resource.os_profile.linux_configuration = self.compute_models.LinuxConfiguration(
- disable_password_authentication=linux_config.get('disablePasswordAuthentication', False)
- )
- if ssh_config:
- public_keys = ssh_config.get('publicKeys')
- if public_keys:
- vm_resource.os_profile.linux_configuration.ssh = self.compute_models.SshConfiguration(public_keys=[])
- for key in public_keys:
- vm_resource.os_profile.linux_configuration.ssh.public_keys.append(
- self.compute_models.SshPublicKey(path=key['path'], key_data=key['keyData'])
- )
-
- # data disk
- if vm_dict['properties']['storageProfile'].get('dataDisks'):
- data_disks = []
-
- for data_disk in vm_dict['properties']['storageProfile']['dataDisks']:
- if data_disk.get('managedDisk'):
- managed_disk_type = data_disk['managedDisk'].get('storageAccountType')
- data_disk_managed_disk = self.compute_models.ManagedDiskParameters(storage_account_type=managed_disk_type)
- data_disk_vhd = None
- else:
- data_disk_vhd = data_disk['vhd']['uri']
- data_disk_managed_disk = None
-
- data_disks.append(self.compute_models.DataDisk(
- lun=int(data_disk['lun']),
- name=data_disk.get('name'),
- vhd=data_disk_vhd,
- caching=data_disk.get('caching'),
- create_option=data_disk.get('createOption'),
- disk_size_gb=int(data_disk['diskSizeGB']),
- managed_disk=data_disk_managed_disk,
- ))
- vm_resource.storage_profile.data_disks = data_disks
-
- self.log("Update virtual machine with parameters:")
- self.create_or_update_vm(vm_resource, False)
-
- # Make sure we leave the machine in requested power state
- if (powerstate_change == 'poweron' and
- self.results['ansible_facts']['azure_vm']['powerstate'] != 'running'):
- # Attempt to power on the machine
- self.power_on_vm()
-
- elif (powerstate_change == 'poweroff' and
- self.results['ansible_facts']['azure_vm']['powerstate'] == 'running'):
- # Attempt to power off the machine
- self.power_off_vm()
-
- elif powerstate_change == 'restarted':
- self.restart_vm()
-
- elif powerstate_change == 'deallocated':
- self.deallocate_vm()
- elif powerstate_change == 'generalized':
- self.power_off_vm()
- self.generalize_vm()
-
- self.results['ansible_facts']['azure_vm'] = self.serialize_vm(self.get_vm())
-
- elif self.state == 'absent':
- # delete the VM
- self.log("Delete virtual machine {0}".format(self.name))
- self.results['ansible_facts']['azure_vm'] = None
- self.delete_vm(vm)
-
- # until we sort out how we want to do this globally
- del self.results['actions']
-
- return self.results
-
- def get_vm(self):
- '''
- Get the VM with expanded instanceView
-
- :return: VirtualMachine object
- '''
- try:
- vm = self.compute_client.virtual_machines.get(self.resource_group, self.name, expand='instanceview')
- return vm
- except Exception as exc:
- self.fail("Error getting virtual machine {0} - {1}".format(self.name, str(exc)))
-
- def serialize_vm(self, vm):
- '''
- Convert a VirtualMachine object to dict.
-
- :param vm: VirtualMachine object
- :return: dict
- '''
-
- result = self.serialize_obj(vm, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)
- result['id'] = vm.id
- result['name'] = vm.name
- result['type'] = vm.type
- result['location'] = vm.location
- result['tags'] = vm.tags
-
- result['powerstate'] = dict()
- if vm.instance_view:
- result['powerstate'] = next((s.code.replace('PowerState/', '')
- for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None)
- for s in vm.instance_view.statuses:
- if s.code.lower() == "osstate/generalized":
- result['powerstate'] = 'generalized'
-
- # Expand network interfaces to include config properties
- for interface in vm.network_profile.network_interfaces:
- int_dict = azure_id_to_dict(interface.id)
- nic = self.get_network_interface(int_dict['resourceGroups'], int_dict['networkInterfaces'])
- for interface_dict in result['properties']['networkProfile']['networkInterfaces']:
- if interface_dict['id'] == interface.id:
- nic_dict = self.serialize_obj(nic, 'NetworkInterface')
- interface_dict['name'] = int_dict['networkInterfaces']
- interface_dict['properties'] = nic_dict['properties']
- # Expand public IPs to include config properties
- for interface in result['properties']['networkProfile']['networkInterfaces']:
- for config in interface['properties']['ipConfigurations']:
- if config['properties'].get('publicIPAddress'):
- pipid_dict = azure_id_to_dict(config['properties']['publicIPAddress']['id'])
- try:
- pip = self.network_client.public_ip_addresses.get(pipid_dict['resourceGroups'],
- pipid_dict['publicIPAddresses'])
- except Exception as exc:
- self.fail("Error fetching public ip {0} - {1}".format(pipid_dict['publicIPAddresses'],
- str(exc)))
- pip_dict = self.serialize_obj(pip, 'PublicIPAddress')
- config['properties']['publicIPAddress']['name'] = pipid_dict['publicIPAddresses']
- config['properties']['publicIPAddress']['properties'] = pip_dict['properties']
-
- self.log(result, pretty_print=True)
- if self.state != 'absent' and not result['powerstate']:
- self.fail("Failed to determine PowerState of virtual machine {0}".format(self.name))
- return result
-
- def power_off_vm(self):
- self.log("Powered off virtual machine {0}".format(self.name))
- self.results['actions'].append("Powered off virtual machine {0}".format(self.name))
- try:
- poller = self.compute_client.virtual_machines.power_off(self.resource_group, self.name)
- self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error powering off virtual machine {0} - {1}".format(self.name, str(exc)))
- return True
-
- def power_on_vm(self):
- self.results['actions'].append("Powered on virtual machine {0}".format(self.name))
- self.log("Power on virtual machine {0}".format(self.name))
- try:
- poller = self.compute_client.virtual_machines.start(self.resource_group, self.name)
- self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error powering on virtual machine {0} - {1}".format(self.name, str(exc)))
- return True
-
- def restart_vm(self):
- self.results['actions'].append("Restarted virtual machine {0}".format(self.name))
- self.log("Restart virtual machine {0}".format(self.name))
- try:
- poller = self.compute_client.virtual_machines.restart(self.resource_group, self.name)
- self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error restarting virtual machine {0} - {1}".format(self.name, str(exc)))
- return True
-
- def deallocate_vm(self):
- self.results['actions'].append("Deallocated virtual machine {0}".format(self.name))
- self.log("Deallocate virtual machine {0}".format(self.name))
- try:
- poller = self.compute_client.virtual_machines.deallocate(self.resource_group, self.name)
- self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error deallocating virtual machine {0} - {1}".format(self.name, str(exc)))
- return True
-
- def generalize_vm(self):
- self.results['actions'].append("Generalize virtual machine {0}".format(self.name))
- self.log("Generalize virtual machine {0}".format(self.name))
- try:
- response = self.compute_client.virtual_machines.generalize(self.resource_group, self.name)
- if isinstance(response, LROPoller):
- self.get_poller_result(response)
- except Exception as exc:
- self.fail("Error generalizing virtual machine {0} - {1}".format(self.name, str(exc)))
- return True
-
- def remove_autocreated_resources(self, tags):
- if tags:
- sa_name = tags.get('_own_sa_')
- nic_name = tags.get('_own_nic_')
- pip_name = tags.get('_own_pip_')
- nsg_name = tags.get('_own_nsg_')
- if sa_name:
- self.delete_storage_account(self.resource_group, sa_name)
- if nic_name:
- self.delete_nic(self.resource_group, nic_name)
- if pip_name:
- self.delete_pip(self.resource_group, pip_name)
- if nsg_name:
- self.delete_nsg(self.resource_group, nsg_name)
-
- def delete_vm(self, vm):
- vhd_uris = []
- managed_disk_ids = []
- nic_names = []
- pip_names = []
-
- if 'all_autocreated' not in self.remove_on_absent:
- if self.remove_on_absent.intersection(set(['all', 'virtual_storage'])):
- # store the attached vhd info so we can nuke it after the VM is gone
- if(vm.storage_profile.os_disk.managed_disk):
- self.log('Storing managed disk ID for deletion')
- managed_disk_ids.append(vm.storage_profile.os_disk.managed_disk.id)
- elif(vm.storage_profile.os_disk.vhd):
- self.log('Storing VHD URI for deletion')
- vhd_uris.append(vm.storage_profile.os_disk.vhd.uri)
-
- data_disks = vm.storage_profile.data_disks
- for data_disk in data_disks:
- if data_disk is not None:
- if(data_disk.vhd):
- vhd_uris.append(data_disk.vhd.uri)
- elif(data_disk.managed_disk):
- managed_disk_ids.append(data_disk.managed_disk.id)
-
- # FUTURE enable diff mode, move these there...
- self.log("VHD URIs to delete: {0}".format(', '.join(vhd_uris)))
- self.results['deleted_vhd_uris'] = vhd_uris
- self.log("Managed disk IDs to delete: {0}".format(', '.join(managed_disk_ids)))
- self.results['deleted_managed_disk_ids'] = managed_disk_ids
-
- if self.remove_on_absent.intersection(set(['all', 'network_interfaces'])):
- # store the attached nic info so we can nuke them after the VM is gone
- self.log('Storing NIC names for deletion.')
- for interface in vm.network_profile.network_interfaces:
- id_dict = azure_id_to_dict(interface.id)
- nic_names.append(dict(name=id_dict['networkInterfaces'], resource_group=id_dict['resourceGroups']))
- self.log('NIC names to delete {0}'.format(str(nic_names)))
- self.results['deleted_network_interfaces'] = nic_names
- if self.remove_on_absent.intersection(set(['all', 'public_ips'])):
- # also store each nic's attached public IPs and delete after the NIC is gone
- for nic_dict in nic_names:
- nic = self.get_network_interface(nic_dict['resource_group'], nic_dict['name'])
- for ipc in nic.ip_configurations:
- if ipc.public_ip_address:
- pip_dict = azure_id_to_dict(ipc.public_ip_address.id)
- pip_names.append(dict(name=pip_dict['publicIPAddresses'], resource_group=pip_dict['resourceGroups']))
- self.log('Public IPs to delete are {0}'.format(str(pip_names)))
- self.results['deleted_public_ips'] = pip_names
-
- self.log("Deleting virtual machine {0}".format(self.name))
- self.results['actions'].append("Deleted virtual machine {0}".format(self.name))
- try:
- poller = self.compute_client.virtual_machines.delete(self.resource_group, self.name)
- # wait for the poller to finish
- self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error deleting virtual machine {0} - {1}".format(self.name, str(exc)))
-
- # TODO: parallelize nic, vhd, and public ip deletions with begin_deleting
- # TODO: best-effort to keep deleting other linked resources if we encounter an error
- if self.remove_on_absent.intersection(set(['all', 'virtual_storage'])):
- self.log('Deleting VHDs')
- self.delete_vm_storage(vhd_uris)
- self.log('Deleting managed disks')
- self.delete_managed_disks(managed_disk_ids)
-
- if 'all' in self.remove_on_absent or 'all_autocreated' in self.remove_on_absent:
- self.remove_autocreated_resources(vm.tags)
-
- if self.remove_on_absent.intersection(set(['all', 'network_interfaces'])):
- self.log('Deleting network interfaces')
- for nic_dict in nic_names:
- self.delete_nic(nic_dict['resource_group'], nic_dict['name'])
-
- if self.remove_on_absent.intersection(set(['all', 'public_ips'])):
- self.log('Deleting public IPs')
- for pip_dict in pip_names:
- self.delete_pip(pip_dict['resource_group'], pip_dict['name'])
-
- if 'all' in self.remove_on_absent or 'all_autocreated' in self.remove_on_absent:
- self.remove_autocreated_resources(vm.tags)
-
- return True
-
- def get_network_interface(self, resource_group, name):
- try:
- nic = self.network_client.network_interfaces.get(resource_group, name)
- return nic
- except Exception as exc:
- self.fail("Error fetching network interface {0} - {1}".format(name, str(exc)))
- return True
-
- def delete_nic(self, resource_group, name):
- self.log("Deleting network interface {0}".format(name))
- self.results['actions'].append("Deleted network interface {0}".format(name))
- try:
- poller = self.network_client.network_interfaces.delete(resource_group, name)
- except Exception as exc:
- self.fail("Error deleting network interface {0} - {1}".format(name, str(exc)))
- self.get_poller_result(poller)
- # Delete doesn't return anything. If we get this far, assume success
- return True
-
- def delete_pip(self, resource_group, name):
- self.results['actions'].append("Deleted public IP {0}".format(name))
- try:
- poller = self.network_client.public_ip_addresses.delete(resource_group, name)
- self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error deleting {0} - {1}".format(name, str(exc)))
- # Delete returns nada. If we get here, assume that all is well.
- return True
-
- def delete_nsg(self, resource_group, name):
- self.results['actions'].append("Deleted NSG {0}".format(name))
- try:
- poller = self.network_client.network_security_groups.delete(resource_group, name)
- self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error deleting {0} - {1}".format(name, str(exc)))
- return True
-
- def delete_managed_disks(self, managed_disk_ids):
- for mdi in managed_disk_ids:
- try:
- poller = self.rm_client.resources.delete_by_id(mdi, '2017-03-30')
- self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error deleting managed disk {0} - {1}".format(mdi, str(exc)))
- return True
-
- def delete_storage_account(self, resource_group, name):
- self.log("Delete storage account {0}".format(name))
- self.results['actions'].append("Deleted storage account {0}".format(name))
- try:
- self.storage_client.storage_accounts.delete(self.resource_group, name)
- except Exception as exc:
- self.fail("Error deleting storage account {0} - {1}".format(name, str(exc)))
- return True
-
- def delete_vm_storage(self, vhd_uris):
- # FUTURE: figure out a cloud_env independent way to delete these
- for uri in vhd_uris:
- self.log("Extracting info from blob uri '{0}'".format(uri))
- try:
- blob_parts = extract_names_from_blob_uri(uri, self._cloud_environment.suffixes.storage_endpoint)
- except Exception as exc:
- self.fail("Error parsing blob URI {0}".format(str(exc)))
- storage_account_name = blob_parts['accountname']
- container_name = blob_parts['containername']
- blob_name = blob_parts['blobname']
-
- blob_client = self.get_blob_client(self.resource_group, storage_account_name)
-
- self.log("Delete blob {0}:{1}".format(container_name, blob_name))
- self.results['actions'].append("Deleted blob {0}:{1}".format(container_name, blob_name))
- try:
- blob_client.delete_blob(container_name, blob_name)
- except Exception as exc:
- self.fail("Error deleting blob {0}:{1} - {2}".format(container_name, blob_name, str(exc)))
- return True
-
- def get_marketplace_image_version(self):
- try:
- versions = self.compute_client.virtual_machine_images.list(self.location,
- self.image['publisher'],
- self.image['offer'],
- self.image['sku'])
- except Exception as exc:
- self.fail("Error fetching image {0} {1} {2} - {3}".format(self.image['publisher'],
- self.image['offer'],
- self.image['sku'],
- str(exc)))
- if versions and len(versions) > 0:
- if self.image['version'] == 'latest':
- return versions[len(versions) - 1]
- for version in versions:
- if version.name == self.image['version']:
- return version
-
- self.fail("Error could not find image {0} {1} {2} {3}".format(self.image['publisher'],
- self.image['offer'],
- self.image['sku'],
- self.image['version']))
- return None
-
- def get_custom_image_reference(self, name, resource_group=None):
- try:
- if resource_group:
- vm_images = self.compute_client.images.list_by_resource_group(resource_group)
- else:
- vm_images = self.compute_client.images.list()
- except Exception as exc:
- self.fail("Error fetching custom images from subscription - {0}".format(str(exc)))
-
- for vm_image in vm_images:
- if vm_image.name == name:
- self.log("Using custom image id {0}".format(vm_image.id))
- return self.compute_models.ImageReference(id=vm_image.id)
-
- self.fail("Error could not find image with name {0}".format(name))
- return None
-
- def get_availability_set(self, resource_group, name):
- try:
- return self.compute_client.availability_sets.get(resource_group, name)
- except Exception as exc:
- self.fail("Error fetching availability set {0} - {1}".format(name, str(exc)))
-
- def get_storage_account(self, name):
- try:
- account = self.storage_client.storage_accounts.get_properties(self.resource_group,
- name)
- return account
- except Exception as exc:
- self.fail("Error fetching storage account {0} - {1}".format(name, str(exc)))
-
- def create_or_update_vm(self, params, remove_autocreated_on_failure):
- try:
- poller = self.compute_client.virtual_machines.create_or_update(self.resource_group, self.name, params)
- self.get_poller_result(poller)
- except Exception as exc:
- if remove_autocreated_on_failure:
- self.remove_autocreated_resources(params.tags)
- self.fail("Error creating or updating virtual machine {0} - {1}".format(self.name, str(exc)))
-
- def vm_size_is_valid(self):
- '''
- Validate self.vm_size against the list of virtual machine sizes available for the account and location.
-
- :return: boolean
- '''
- try:
- sizes = self.compute_client.virtual_machine_sizes.list(self.location)
- except Exception as exc:
- self.fail("Error retrieving available machine sizes - {0}".format(str(exc)))
- for size in sizes:
- if size.name == self.vm_size:
- return True
- return False
-
- def create_default_storage_account(self, vm_dict=None):
- '''
- Create (once) a default storage account <vm name>XXXX, where XXXX is a random number.
- NOTE: If <vm name>XXXX exists, use it instead of failing. Highly unlikely.
- If this method is called multiple times across executions it will return the same
- storage account created with the random name which is stored in a tag on the VM.
-
- vm_dict is passed in during an update, so we can obtain the _own_sa_ tag and return
- the default storage account we created in a previous invocation
-
- :return: storage account object
- '''
- account = None
- valid_name = False
- if self.tags is None:
- self.tags = {}
-
- if self.tags.get('_own_sa_', None):
- # We previously created one in the same invocation
- return self.get_storage_account(self.tags['_own_sa_'])
-
- if vm_dict and vm_dict.get('tags', {}).get('_own_sa_', None):
- # We previously created one in a previous invocation
- # We must be updating, like adding boot diagnostics
- return self.get_storage_account(vm_dict['tags']['_own_sa_'])
-
- # Attempt to find a valid storage account name
- storage_account_name_base = re.sub('[^a-zA-Z0-9]', '', self.name[:20].lower())
- for i in range(0, 5):
- rand = random.randrange(1000, 9999)
- storage_account_name = storage_account_name_base + str(rand)
- if self.check_storage_account_name(storage_account_name):
- valid_name = True
- break
-
- if not valid_name:
- self.fail("Failed to create a unique storage account name for {0}. Try using a different VM name."
- .format(self.name))
-
- try:
- account = self.storage_client.storage_accounts.get_properties(self.resource_group, storage_account_name)
- except CloudError:
- pass
-
- if account:
- self.log("Storage account {0} found.".format(storage_account_name))
- self.check_provisioning_state(account)
- return account
- sku = self.storage_models.Sku(name=self.storage_models.SkuName.standard_lrs)
- sku.tier = self.storage_models.SkuTier.standard
- kind = self.storage_models.Kind.storage
- parameters = self.storage_models.StorageAccountCreateParameters(sku=sku, kind=kind, location=self.location)
- self.log("Creating storage account {0} in location {1}".format(storage_account_name, self.location))
- self.results['actions'].append("Created storage account {0}".format(storage_account_name))
- try:
- poller = self.storage_client.storage_accounts.create(self.resource_group, storage_account_name, parameters)
- self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Failed to create storage account: {0} - {1}".format(storage_account_name, str(exc)))
- self.tags['_own_sa_'] = storage_account_name
- return self.get_storage_account(storage_account_name)
-
- def check_storage_account_name(self, name):
- self.log("Checking storage account name availability for {0}".format(name))
- try:
- response = self.storage_client.storage_accounts.check_name_availability(name)
- if response.reason == 'AccountNameInvalid':
- raise Exception("Invalid default storage account name: {0}".format(name))
- except Exception as exc:
- self.fail("Error checking storage account name availability for {0} - {1}".format(name, str(exc)))
-
- return response.name_available
-
- def create_default_nic(self):
- '''
- Create a default Network Interface <vm name>01. Requires an existing virtual network
- with one subnet. If NIC <vm name>01 exists, use it. Otherwise, create one.
-
- :return: NIC object
- '''
-
- network_interface_name = self.name + '01'
- nic = None
- if self.tags is None:
- self.tags = {}
-
- self.log("Create default NIC {0}".format(network_interface_name))
- self.log("Check to see if NIC {0} exists".format(network_interface_name))
- try:
- nic = self.network_client.network_interfaces.get(self.resource_group, network_interface_name)
- except CloudError:
- pass
-
- if nic:
- self.log("NIC {0} found.".format(network_interface_name))
- self.check_provisioning_state(nic)
- return nic
-
- self.log("NIC {0} does not exist.".format(network_interface_name))
-
- virtual_network_resource_group = None
- if self.virtual_network_resource_group:
- virtual_network_resource_group = self.virtual_network_resource_group
- else:
- virtual_network_resource_group = self.resource_group
-
- if self.virtual_network_name:
- try:
- self.network_client.virtual_networks.list(virtual_network_resource_group, self.virtual_network_name)
- virtual_network_name = self.virtual_network_name
- except CloudError as exc:
- self.fail("Error: fetching virtual network {0} - {1}".format(self.virtual_network_name, str(exc)))
-
- else:
- # Find a virtual network
- no_vnets_msg = "Error: unable to find virtual network in resource group {0}. A virtual network " \
- "with at least one subnet must exist in order to create a NIC for the virtual " \
- "machine.".format(virtual_network_resource_group)
-
- virtual_network_name = None
- try:
- vnets = self.network_client.virtual_networks.list(virtual_network_resource_group)
- except CloudError:
- self.log('cloud error!')
- self.fail(no_vnets_msg)
-
- for vnet in vnets:
- virtual_network_name = vnet.name
- self.log('vnet name: {0}'.format(vnet.name))
- break
-
- if not virtual_network_name:
- self.fail(no_vnets_msg)
-
- if self.subnet_name:
- try:
- subnet = self.network_client.subnets.get(virtual_network_resource_group, virtual_network_name, self.subnet_name)
- subnet_id = subnet.id
- except Exception as exc:
- self.fail("Error: fetching subnet {0} - {1}".format(self.subnet_name, str(exc)))
- else:
- no_subnets_msg = "Error: unable to find a subnet in virtual network {0}. A virtual network " \
- "with at least one subnet must exist in order to create a NIC for the virtual " \
- "machine.".format(virtual_network_name)
-
- subnet_id = None
- try:
- subnets = self.network_client.subnets.list(virtual_network_resource_group, virtual_network_name)
- except CloudError:
- self.fail(no_subnets_msg)
-
- for subnet in subnets:
- subnet_id = subnet.id
- self.log('subnet id: {0}'.format(subnet_id))
- break
-
- if not subnet_id:
- self.fail(no_subnets_msg)
-
- pip = None
- if self.public_ip_allocation_method != 'Disabled':
- self.results['actions'].append('Created default public IP {0}'.format(self.name + '01'))
- sku = self.network_models.PublicIPAddressSku(name="Standard") if self.zones else None
- pip_facts = self.create_default_pip(self.resource_group, self.location, self.name + '01', self.public_ip_allocation_method, sku=sku)
- pip = self.network_models.PublicIPAddress(id=pip_facts.id, location=pip_facts.location, resource_guid=pip_facts.resource_guid, sku=sku)
- self.tags['_own_pip_'] = self.name + '01'
-
- self.results['actions'].append('Created default security group {0}'.format(self.name + '01'))
- group = self.create_default_securitygroup(self.resource_group, self.location, self.name + '01', self.os_type,
- self.open_ports)
- self.tags['_own_nsg_'] = self.name + '01'
-
- parameters = self.network_models.NetworkInterface(
- location=self.location,
- ip_configurations=[
- self.network_models.NetworkInterfaceIPConfiguration(
- private_ip_allocation_method='Dynamic',
- )
- ]
- )
- parameters.ip_configurations[0].subnet = self.network_models.Subnet(id=subnet_id)
- parameters.ip_configurations[0].name = 'default'
- parameters.network_security_group = self.network_models.NetworkSecurityGroup(id=group.id,
- location=group.location,
- resource_guid=group.resource_guid)
- parameters.ip_configurations[0].public_ip_address = pip
-
- self.log("Creating NIC {0}".format(network_interface_name))
- self.log(self.serialize_obj(parameters, 'NetworkInterface'), pretty_print=True)
- self.results['actions'].append("Created NIC {0}".format(network_interface_name))
- try:
- poller = self.network_client.network_interfaces.create_or_update(self.resource_group,
- network_interface_name,
- parameters)
- new_nic = self.get_poller_result(poller)
- self.tags['_own_nic_'] = network_interface_name
- except Exception as exc:
- self.fail("Error creating network interface {0} - {1}".format(network_interface_name, str(exc)))
- return new_nic
-
- def parse_network_interface(self, nic):
- nic = self.parse_resource_to_dict(nic)
- if 'name' not in nic:
- self.fail("Invalid network interface {0}".format(str(nic)))
- return format_resource_id(val=nic['name'],
- subscription_id=nic['subscription_id'],
- resource_group=nic['resource_group'],
- namespace='Microsoft.Network',
- types='networkInterfaces')
-
-
-def main():
- AzureRMVirtualMachine()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachine_info.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualmachine_info.py
deleted file mode 100644
index f6cb8ec906..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachine_info.py
+++ /dev/null
@@ -1,456 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018
-# Gustavo Muniz do Carmo <gustavo@esign.com.br>
-# Zim Kalinowski <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_virtualmachine_info
-
-version_added: "2.9"
-
-short_description: Get virtual machine facts
-
-description:
- - Get facts for one or all virtual machines in a resource group.
-
-options:
- resource_group:
- description:
- - Name of the resource group containing the virtual machines (required when filtering by vm name).
- name:
- description:
- - Name of the virtual machine.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Gustavo Muniz do Carmo (@gustavomcarmo)
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get facts for all virtual machines of a resource group
- azure_rm_virtualmachine_info:
- resource_group: myResourceGroup
-
- - name: Get facts by name
- azure_rm_virtualmachine_info:
- resource_group: myResourceGroup
- name: myVm
-
- - name: Get facts by tags
- azure_rm_virtualmachine_info:
- resource_group: myResourceGroup
- tags:
- - testing
- - foo:bar
-'''
-
-RETURN = '''
-vms:
- description:
- - List of virtual machines.
- returned: always
- type: complex
- contains:
- admin_username:
- description:
- - Administrator user name.
- returned: always
- type: str
- sample: admin
- boot_diagnostics:
- description:
- - Information about the boot diagnostics settings.
- returned: always
- type: complex
- contains:
- enabled:
- description:
- - Indicates if boot diagnostics are enabled.
- returned: always
- type: bool
- sample: true
- storage_uri:
- description:
- - Indicates the storage account used by boot diagnostics.
- returned: always
- type: str
- sample: https://mystorageaccountname.blob.core.windows.net/
- console_screenshot_uri:
- description:
- - Contains a URI to grab a console screenshot.
- - Only present if enabled.
- returned: always
- type: str
- sample: https://mystorageaccountname.blob.core.windows.net/bootdiagnostics-myvm01-a4db09a6-ab7f-4d80-9da8-fbceaef9288a/
- myVm.a4db09a6-ab7f-4d80-9da8-fbceaef9288a.screenshot.bmp
- serial_console_log_uri:
- description:
- - Contains a URI to grab the serial console log.
- - Only present if enabled.
- returned: always
- type: str
- sample: https://mystorageaccountname.blob.core.windows.net/bootdiagnostics-myvm01-a4db09a6-ab7f-4d80-9da8-fbceaef9288a/
- myVm.a4db09a6-ab7f-4d80-9da8-fbceaef9288a.serialconsole.log
- data_disks:
- description:
- - List of attached data disks.
- returned: always
- type: complex
- contains:
- caching:
- description:
- - Type of data disk caching.
- returned: always
- type: str
- sample: ReadOnly
- disk_size_gb:
- description:
- - The initial disk size in GB for blank data disks.
- returned: always
- type: int
- sample: 64
- lun:
- description:
- - The logical unit number for data disk.
- returned: always
- type: int
- sample: 0
- managed_disk_type:
- description:
- - Managed data disk type.
- returned: always
- type: str
- sample: Standard_LRS
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachines/myVm
- image:
- description:
- - Image specification.
- returned: always
- type: complex
- contains:
- offer:
- description:
- - The offer of the platform image or marketplace image used to create the virtual machine.
- type: str
- returned: when created from marketplace image
- sample: RHEL
- publisher:
- description:
- - Publisher name.
- type: str
- returned: when created from marketplace image
- sample: RedHat
- sku:
- description:
- - SKU name.
- type: str
- returned: when created from marketplace image
- sample: 7-RAW
- version:
- description:
- - Image version.
- type: str
- returned: when created from marketplace image
- sample: 7.5.2018050901
- id:
- description:
- - Custom image resource ID.
- type: str
- returned: when created from custom image
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/images/myImage
- location:
- description:
- - Resource location.
- returned: always
- type: str
- sample: japaneast
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: myVm
- network_interface_names:
- description:
- - List of attached network interfaces.
- returned: always
- type: list
- sample: [
- "myNetworkInterface"
- ]
- os_disk_caching:
- description:
- - Type of OS disk caching.
- returned: always
- type: str
- sample: ReadOnly
- os_type:
- description:
- - Base type of operating system.
- returned: always
- type: str
- sample: Linux
- resource_group:
- description:
- - Resource group.
- returned: always
- type: str
- sample: myResourceGroup
- state:
- description:
- - State of the resource.
- returned: always
- type: str
- sample: present
- tags:
- description:
- - Resource tags.
- returned: always
- type: dict
- sample: { "key1":"value1" }
- vm_size:
- description:
- - Virtual machine size.
- returned: always
- type: str
- sample: Standard_D4
- power_state:
- description:
- - Power state of the virtual machine.
- returned: always
- type: str
- sample: running
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.tools import parse_resource_id
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-from ansible.module_utils.six.moves.urllib.parse import urlparse
-import re
-
-
-AZURE_OBJECT_CLASS = 'VirtualMachine'
-
-AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
-
-
-class AzureRMVirtualMachineInfo(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str'),
- name=dict(type='str'),
- tags=dict(type='list')
- )
-
- self.results = dict(
- changed=False,
- vms=[]
- )
-
- self.resource_group = None
- self.name = None
- self.tags = None
-
- super(AzureRMVirtualMachineInfo, self).__init__(self.module_arg_spec,
- supports_tags=False,
- facts_module=True)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_virtualmachine_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_virtualmachine_facts' module has been renamed to 'azure_rm_virtualmachine_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name and not self.resource_group:
- self.fail("Parameter error: resource group required when filtering by name.")
- if self.name:
- self.results['vms'] = self.get_item()
- elif self.resource_group:
- self.results['vms'] = self.list_items_by_resourcegroup()
- else:
- self.results['vms'] = self.list_all_items()
-
- return self.results
-
- def get_item(self):
- self.log('Get properties for {0}'.format(self.name))
- item = None
- result = []
-
- item = self.get_vm(self.resource_group, self.name)
-
- if item and self.has_tags(item.get('tags'), self.tags):
- result = [item]
-
- return result
-
- def list_items_by_resourcegroup(self):
- self.log('List all items')
- try:
- items = self.compute_client.virtual_machines.list(self.resource_group)
- except CloudError as exc:
- self.fail("Failed to list all items - {0}".format(str(exc)))
-
- results = []
- for item in items:
- if self.has_tags(item.tags, self.tags):
- results.append(self.get_vm(self.resource_group, item.name))
- return results
-
- def list_all_items(self):
- self.log('List all items')
- try:
- items = self.compute_client.virtual_machines.list_all()
- except CloudError as exc:
- self.fail("Failed to list all items - {0}".format(str(exc)))
-
- results = []
- for item in items:
- if self.has_tags(item.tags, self.tags):
- results.append(self.get_vm(parse_resource_id(item.id).get('resource_group'), item.name))
- return results
-
- def get_vm(self, resource_group, name):
- '''
- Get the VM with expanded instanceView
-
- :return: VirtualMachine object
- '''
- try:
- vm = self.compute_client.virtual_machines.get(resource_group, name, expand='instanceview')
- return self.serialize_vm(vm)
- except Exception as exc:
- self.fail("Error getting virtual machine {0} - {1}".format(self.name, str(exc)))
-
- def serialize_vm(self, vm):
- '''
- Convert a VirtualMachine object to dict.
-
- :param vm: VirtualMachine object
- :return: dict
- '''
-
- result = self.serialize_obj(vm, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)
- resource_group = parse_resource_id(result['id']).get('resource_group')
- instance = None
- power_state = None
-
- try:
- instance = self.compute_client.virtual_machines.instance_view(resource_group, vm.name)
- instance = self.serialize_obj(instance, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)
- except Exception as exc:
- self.fail("Error getting virtual machine {0} instance view - {1}".format(vm.name, str(exc)))
-
- for index in range(len(instance['statuses'])):
- code = instance['statuses'][index]['code'].split('/')
- if code[0] == 'PowerState':
- power_state = code[1]
- elif code[0] == 'OSState' and code[1] == 'generalized':
- power_state = 'generalized'
- break
-
- new_result = {}
- new_result['power_state'] = power_state
- new_result['id'] = vm.id
- new_result['resource_group'] = resource_group
- new_result['name'] = vm.name
- new_result['state'] = 'present'
- new_result['location'] = vm.location
- new_result['vm_size'] = result['properties']['hardwareProfile']['vmSize']
- os_profile = result['properties'].get('osProfile')
- if os_profile is not None:
- new_result['admin_username'] = os_profile.get('adminUsername')
- image = result['properties']['storageProfile'].get('imageReference')
- if image is not None:
- if image.get('publisher', None) is not None:
- new_result['image'] = {
- 'publisher': image['publisher'],
- 'sku': image['sku'],
- 'offer': image['offer'],
- 'version': image['version']
- }
- else:
- new_result['image'] = {
- 'id': image.get('id', None)
- }
-
- new_result['boot_diagnostics'] = {
- 'enabled': 'diagnosticsProfile' in result['properties'] and
- 'bootDiagnostics' in result['properties']['diagnosticsProfile'] and
- result['properties']['diagnosticsProfile']['bootDiagnostics']['enabled'] or False,
- 'storage_uri': 'diagnosticsProfile' in result['properties'] and
- 'bootDiagnostics' in result['properties']['diagnosticsProfile'] and
- result['properties']['diagnosticsProfile']['bootDiagnostics']['storageUri'] or None
- }
- if new_result['boot_diagnostics']['enabled']:
- new_result['boot_diagnostics']['console_screenshot_uri'] = result['properties']['instanceView']['bootDiagnostics']['consoleScreenshotBlobUri']
- new_result['boot_diagnostics']['serial_console_log_uri'] = result['properties']['instanceView']['bootDiagnostics']['serialConsoleLogBlobUri']
-
- vhd = result['properties']['storageProfile']['osDisk'].get('vhd')
- if vhd is not None:
- url = urlparse(vhd['uri'])
- new_result['storage_account_name'] = url.netloc.split('.')[0]
- new_result['storage_container_name'] = url.path.split('/')[1]
- new_result['storage_blob_name'] = url.path.split('/')[-1]
-
- new_result['os_disk_caching'] = result['properties']['storageProfile']['osDisk']['caching']
- new_result['os_type'] = result['properties']['storageProfile']['osDisk']['osType']
- new_result['data_disks'] = []
- disks = result['properties']['storageProfile']['dataDisks']
- for disk_index in range(len(disks)):
- new_result['data_disks'].append({
- 'lun': disks[disk_index].get('lun'),
- 'disk_size_gb': disks[disk_index].get('diskSizeGB'),
- 'managed_disk_type': disks[disk_index].get('managedDisk', {}).get('storageAccountType'),
- 'caching': disks[disk_index].get('caching')
- })
-
- new_result['network_interface_names'] = []
- nics = result['properties']['networkProfile']['networkInterfaces']
- for nic_index in range(len(nics)):
- new_result['network_interface_names'].append(re.sub('.*networkInterfaces/', '', nics[nic_index]['id']))
-
- new_result['tags'] = vm.tags
- return new_result
-
-
-def main():
- AzureRMVirtualMachineInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachineextension.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualmachineextension.py
deleted file mode 100644
index 3ed97e98cf..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachineextension.py
+++ /dev/null
@@ -1,339 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2017 Sertac Ozercan <seozerca@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_virtualmachineextension
-
-version_added: "2.4"
-
-short_description: Managed Azure Virtual Machine extension
-
-description:
- - Create, update and delete Azure Virtual Machine Extension.
- - Note that this module was called M(azure_rm_virtualmachine_extension) before Ansible 2.8. The usage did not change.
-
-options:
- resource_group:
- description:
- - Name of a resource group where the vm extension exists or will be created.
- required: true
- name:
- description:
- - Name of the vm extension.
- required: true
- state:
- description:
- - State of the vm extension. Use C(present) to create or update a vm extension and C(absent) to delete a vm extension.
- default: present
- choices:
- - absent
- - present
- location:
- description:
- - Valid Azure location. Defaults to location of the resource group.
- virtual_machine_name:
- description:
- - The name of the virtual machine where the extension should be create or updated.
- publisher:
- description:
- - The name of the extension handler publisher.
- virtual_machine_extension_type:
- description:
- - The type of the extension handler.
- type_handler_version:
- description:
- - The type version of the extension handler.
- settings:
- description:
- - Json formatted public settings for the extension.
- protected_settings:
- description:
- - Json formatted protected settings for the extension.
- auto_upgrade_minor_version:
- description:
- - Whether the extension handler should be automatically upgraded across minor versions.
- type: bool
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Sertac Ozercan (@sozercan)
- - Julien Stroheker (@julienstroheker)
-'''
-
-EXAMPLES = '''
- - name: Create VM Extension
- azure_rm_virtualmachineextension:
- name: myvmextension
- location: eastus
- resource_group: myResourceGroup
- virtual_machine_name: myvm
- publisher: Microsoft.Azure.Extensions
- virtual_machine_extension_type: CustomScript
- type_handler_version: 2.0
- settings: '{"commandToExecute": "hostname"}'
- auto_upgrade_minor_version: true
-
- - name: Delete VM Extension
- azure_rm_virtualmachineextension:
- name: myvmextension
- location: eastus
- resource_group: myResourceGroup
- virtual_machine_name: myvm
- state: absent
-'''
-
-RETURN = '''
-state:
- description:
- - Current state of the vm extension.
- returned: always
- type: dict
- sample: { "state":"Deleted" }
-
-changed:
- description:
- - Whether or not the resource has changed.
- returned: always
- type: bool
- sample: true
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-def vmextension_to_dict(extension):
- '''
- Serializing the VM Extension from the API to Dict
- :return: dict
- '''
- return dict(
- id=extension.id,
- name=extension.name,
- location=extension.location,
- publisher=extension.publisher,
- virtual_machine_extension_type=extension.virtual_machine_extension_type,
- type_handler_version=extension.type_handler_version,
- auto_upgrade_minor_version=extension.auto_upgrade_minor_version,
- settings=extension.settings,
- protected_settings=extension.protected_settings,
- )
-
-
-class AzureRMVMExtension(AzureRMModuleBase):
- """Configuration class for an Azure RM VM Extension resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- ),
- location=dict(
- type='str'
- ),
- virtual_machine_name=dict(
- type='str'
- ),
- publisher=dict(
- type='str'
- ),
- virtual_machine_extension_type=dict(
- type='str'
- ),
- type_handler_version=dict(
- type='str'
- ),
- auto_upgrade_minor_version=dict(
- type='bool'
- ),
- settings=dict(
- type='dict'
- ),
- protected_settings=dict(
- type='dict'
- )
- )
-
- self.resource_group = None
- self.name = None
- self.location = None
- self.publisher = None
- self.virtual_machine_extension_type = None
- self.type_handler_version = None
- self.auto_upgrade_minor_version = None
- self.settings = None
- self.protected_settings = None
- self.state = None
-
- required_if = [
- ('state', 'present', [
- 'publisher', 'virtual_machine_extension_type', 'type_handler_version'])
- ]
-
- self.results = dict(changed=False, state=dict())
-
- super(AzureRMVMExtension, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=False,
- supports_tags=False,
- required_if=required_if)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()):
- setattr(self, key, kwargs[key])
-
- if self.module._name == 'azure_rm_virtualmachine_extension':
- self.module.deprecate("The 'azure_rm_virtualmachine_extension' module has been renamed to 'azure_rm_virtualmachineextension'", version='2.12')
-
- resource_group = None
- response = None
- to_be_updated = False
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- if self.state == 'present':
- response = self.get_vmextension()
- if not response:
- to_be_updated = True
- else:
- if self.settings is not None:
- if response['settings'] != self.settings:
- response['settings'] = self.settings
- to_be_updated = True
- else:
- self.settings = response['settings']
-
- if self.protected_settings is not None:
- if response['protected_settings'] != self.protected_settings:
- response['protected_settings'] = self.protected_settings
- to_be_updated = True
- else:
- self.protected_settings = response['protected_settings']
-
- if response['location'] != self.location:
- self.location = response['location']
- self.module.warn("Property 'location' cannot be changed")
-
- if response['publisher'] != self.publisher:
- self.publisher = response['publisher']
- self.module.warn("Property 'publisher' cannot be changed")
-
- if response['virtual_machine_extension_type'] != self.virtual_machine_extension_type:
- self.virtual_machine_extension_type = response['virtual_machine_extension_type']
- self.module.warn("Property 'virtual_machine_extension_type' cannot be changed")
-
- if response['type_handler_version'] != self.type_handler_version:
- response['type_handler_version'] = self.type_handler_version
- to_be_updated = True
-
- if self.auto_upgrade_minor_version is not None:
- if response['auto_upgrade_minor_version'] != self.auto_upgrade_minor_version:
- response['auto_upgrade_minor_version'] = self.auto_upgrade_minor_version
- to_be_updated = True
- else:
- self.auto_upgrade_minor_version = response['auto_upgrade_minor_version']
-
- if to_be_updated:
- self.results['changed'] = True
- self.results['state'] = self.create_or_update_vmextension()
- elif self.state == 'absent':
- self.delete_vmextension()
- self.results['changed'] = True
-
- return self.results
-
- def create_or_update_vmextension(self):
- '''
- Method calling the Azure SDK to create or update the VM extension.
- :return: void
- '''
- self.log("Creating VM extension {0}".format(self.name))
- try:
- params = self.compute_models.VirtualMachineExtension(
- location=self.location,
- publisher=self.publisher,
- virtual_machine_extension_type=self.virtual_machine_extension_type,
- type_handler_version=self.type_handler_version,
- auto_upgrade_minor_version=self.auto_upgrade_minor_version,
- settings=self.settings,
- protected_settings=self.protected_settings
- )
- poller = self.compute_client.virtual_machine_extensions.create_or_update(self.resource_group, self.virtual_machine_name, self.name, params)
- response = self.get_poller_result(poller)
- return vmextension_to_dict(response)
-
- except CloudError as e:
- self.log('Error attempting to create the VM extension.')
- self.fail("Error creating the VM extension: {0}".format(str(e)))
-
- def delete_vmextension(self):
- '''
- Method calling the Azure SDK to delete the VM Extension.
- :return: void
- '''
- self.log("Deleting vmextension {0}".format(self.name))
- try:
- poller = self.compute_client.virtual_machine_extensions.delete(self.resource_group, self.virtual_machine_name, self.name)
- self.get_poller_result(poller)
- except CloudError as e:
- self.log('Error attempting to delete the vmextension.')
- self.fail("Error deleting the vmextension: {0}".format(str(e)))
-
- def get_vmextension(self):
- '''
- Method calling the Azure SDK to get a VM Extension.
- :return: void
- '''
- self.log("Checking if the vm extension {0} is present".format(self.name))
- found = False
- try:
- response = self.compute_client.virtual_machine_extensions.get(self.resource_group, self.virtual_machine_name, self.name)
- found = True
- except CloudError as e:
- self.log('Did not find vm extension')
- if found:
- return vmextension_to_dict(response)
- else:
- return False
-
-
-def main():
- """Main execution"""
- AzureRMVMExtension()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachineextension_info.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualmachineextension_info.py
deleted file mode 100644
index de9d7975c2..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachineextension_info.py
+++ /dev/null
@@ -1,248 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_virtualmachineextension_info
-version_added: "2.9"
-short_description: Get Azure Virtual Machine Extension facts
-description:
- - Get facts of Azure Virtual Machine Extension.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- virtual_machine_name:
- description:
- - The name of the virtual machine containing the extension.
- required: True
- name:
- description:
- - The name of the virtual machine extension.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get information on specific Virtual Machine Extension
- azure_rm_virtualmachineextension_info:
- resource_group: myResourceGroup
- virtual_machine_name: myvm
- name: myextension
-
- - name: List installed Virtual Machine Extensions
- azure_rm_virtualmachineextension_info:
- resource_group: myResourceGroup
- virtual_machine_name: myvm
-'''
-
-RETURN = '''
-extensions:
- description:
- - A list of dictionaries containing facts for Virtual Machine Extension.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/myvm/testVM/extens
- ions/myextension"
- resource_group:
- description:
- - Resource group name.
- returned: always
- type: str
- sample: myResourceGroup
- virtual_machine_name:
- description:
- - Virtual machine name.
- returned: always
- type: str
- sample: myvm
- name:
- description:
- - Virtual machine name.
- returned: always
- type: str
- sample: myextension
- location:
- description:
- - The resource location.
- returned: always
- type: str
- sample: eastus
- publisher:
- description:
- - Extension publisher.
- returned: always
- type: str
- sample: Microsoft.Azure.Extensions
- type:
- description:
- - Extension type.
- returned: always
- type: str
- sample: CustomScript
- settings:
- description:
- - Extension specific settings dictionary.
- returned: always
- type: dict
- sample: { 'commandToExecute':'hostname' }
- auto_upgrade_minor_version:
- description:
- - Autoupgrade minor version flag.
- returned: always
- type: bool
- sample: true
- tags:
- description:
- - Resource tags.
- returned: always
- type: dict
- sample: { "mytag":"abc" }
- provisioning_state:
- description:
- - Provisioning state of the extension.
- returned: always
- type: str
- sample: Succeeded
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMVirtualMachineExtensionInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- virtual_machine_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.resource_group = None
- self.virtual_machine_name = None
- self.name = None
- self.tags = None
- super(AzureRMVirtualMachineExtensionInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_virtualmachineextension_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_virtualmachineextension_facts' module has been renamed to 'azure_rm_virtualmachineextension_info'",
- version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name is not None:
- self.results['extensions'] = self.get_extensions()
- else:
- self.results['extensions'] = self.list_extensions()
-
- return self.results
-
- def get_extensions(self):
- response = None
- results = []
- try:
- response = self.compute_client.virtual_machine_extensions.get(resource_group_name=self.resource_group,
- vm_name=self.virtual_machine_name,
- vm_extension_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Virtual Machine Extension.')
-
- if response and self.has_tags(response.tags, self.tags):
- results.append(self.format_response(response))
-
- return results
-
- def list_extensions(self):
- response = None
- results = []
- try:
- response = self.compute_client.virtual_machine_extensions.list(resource_group_name=self.resource_group,
- vm_name=self.virtual_machine_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Virtual Machine Extension.')
-
- if response is not None and response.value is not None:
- for item in response.value:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_response(item))
-
- return results
-
- def format_response(self, item):
- d = item.as_dict()
- d = {
- 'id': d.get('id', None),
- 'resource_group': self.resource_group,
- 'virtual_machine_name': self.virtual_machine_name,
- 'location': d.get('location'),
- 'name': d.get('name'),
- 'publisher': d.get('publisher'),
- 'type': d.get('virtual_machine_extension_type'),
- 'settings': d.get('settings'),
- 'auto_upgrade_minor_version': d.get('auto_upgrade_minor_version'),
- 'tags': d.get('tags', None),
- 'provisioning_state': d.get('provisioning_state')
- }
- return d
-
-
-def main():
- AzureRMVirtualMachineExtensionInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachineimage_info.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualmachineimage_info.py
deleted file mode 100644
index 5221273184..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachineimage_info.py
+++ /dev/null
@@ -1,258 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_virtualmachineimage_info
-
-version_added: "2.9"
-
-short_description: Get virtual machine image facts
-
-description:
- - Get facts for virtual machine images.
-
-options:
- location:
- description:
- - Azure location value, for example C(westus), C(eastus), C(eastus2), C(northcentralus), etc.
- - Supplying only a location value will yield a list of available publishers for the location.
- required: true
- publisher:
- description:
- - Name of an image publisher. List image offerings associated with a particular publisher.
- offer:
- description:
- - Name of an image offering. Combine with SKU to see a list of available image versions.
- sku:
- description:
- - Image offering SKU. Combine with offer to see a list of available versions.
- version:
- description:
- - Specific version number of an image.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Chris Houseknecht (@chouseknecht)
- - Matt Davis (@nitzmahone)
-
-'''
-
-EXAMPLES = '''
- - name: Get facts for a specific image
- azure_rm_virtualmachineimage_info:
- location: eastus
- publisher: OpenLogic
- offer: CentOS
- sku: '7.1'
- version: '7.1.20160308'
-
- - name: List available versions
- azure_rm_virtualmachineimage_info:
- location: eastus
- publisher: OpenLogic
- offer: CentOS
- sku: '7.1'
-
- - name: List available offers
- azure_rm_virtualmachineimage_info:
- location: eastus
- publisher: OpenLogic
-
- - name: List available publishers
- azure_rm_virtualmachineimage_info:
- location: eastus
-
-'''
-
-RETURN = '''
-azure_vmimages:
- description:
- - List of image dicts.
- returned: always
- type: list
- example: [ {
- "id": "/Subscriptions/xxx...xxx/Providers/Microsoft.Compute/Locations/eastus/
- Publishers/OpenLogic/ArtifactTypes/VMImage/Offers/CentOS/Skus/7.1/Versions/7.1.20150410",
- "location": "eastus",
- "name": "7.1.20150410"
- },
- {
- "id": "/Subscriptions/xxx...xxx/Providers/Microsoft.Compute/Locations/eastus/
- Publishers/OpenLogic/ArtifactTypes/VMImage/Offers/CentOS/Skus/7.1/Versions/7.1.20150605",
- "location": "eastus",
- "name": "7.1.20150605"
- },
- {
- "id": "/Subscriptions/xxx...xxx/Providers/Microsoft.Compute/Locations/eastus/
- Publishers/OpenLogic/ArtifactTypes/VMImage/Offers/CentOS/Skus/7.1/Versions/7.1.20150731",
- "location": "eastus",
- "name": "7.1.20150731"
- },
- {
- "id": "/Subscriptions/xxx...xxx/Providers/Microsoft.Compute/Locations/eastus/
- Publishers/OpenLogic/ArtifactTypes/VMImage/Offers/CentOS/Skus/7.1/Versions/7.1.20160308",
- "location": "eastus",
- "name": "7.1.20160308"
- }
- ]
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-
-AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
-
-
-class AzureRMVirtualMachineImageInfo(AzureRMModuleBase):
-
- def __init__(self, **kwargs):
-
- self.module_arg_spec = dict(
- location=dict(type='str', required=True),
- publisher=dict(type='str'),
- offer=dict(type='str'),
- sku=dict(type='str'),
- version=dict(type='str')
- )
-
- self.results = dict(
- changed=False,
- )
-
- self.location = None
- self.publisher = None
- self.offer = None
- self.sku = None
- self.version = None
-
- super(AzureRMVirtualMachineImageInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_virtualmachineimage_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_virtualmachineimage_facts' module has been renamed to 'azure_rm_virtualmachineimage_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if is_old_facts:
- self.results['ansible_facts'] = dict()
- if self.location and self.publisher and self.offer and self.sku and self.version:
- self.results['ansible_facts']['azure_vmimages'] = self.get_item()
- elif self.location and self.publisher and self.offer and self.sku:
- self.results['ansible_facts']['azure_vmimages'] = self.list_images()
- elif self.location and self.publisher:
- self.results['ansible_facts']['azure_vmimages'] = self.list_offers()
- elif self.location:
- self.results['ansible_facts']['azure_vmimages'] = self.list_publishers()
- else:
- if self.location and self.publisher and self.offer and self.sku and self.version:
- self.results['vmimages'] = self.get_item()
- elif self.location and self.publisher and self.offer and self.sku:
- self.results['vmimages'] = self.list_images()
- elif self.location and self.publisher:
- self.results['vmimages'] = self.list_offers()
- elif self.location:
- self.results['vmimages'] = self.list_publishers()
-
- return self.results
-
- def get_item(self):
- item = None
- result = []
-
- try:
- item = self.compute_client.virtual_machine_images.get(self.location,
- self.publisher,
- self.offer,
- self.sku,
- self.version)
- except CloudError:
- pass
-
- if item:
- result = [self.serialize_obj(item, 'VirtualMachineImage', enum_modules=AZURE_ENUM_MODULES)]
-
- return result
-
- def list_images(self):
- response = None
- results = []
- try:
- response = self.compute_client.virtual_machine_images.list(self.location,
- self.publisher,
- self.offer,
- self.sku,)
- except CloudError:
- pass
- except Exception as exc:
- self.fail("Failed to list images: {0}".format(str(exc)))
-
- if response:
- for item in response:
- results.append(self.serialize_obj(item, 'VirtualMachineImageResource',
- enum_modules=AZURE_ENUM_MODULES))
- return results
-
- def list_offers(self):
- response = None
- results = []
- try:
- response = self.compute_client.virtual_machine_images.list_offers(self.location,
- self.publisher)
- except CloudError:
- pass
- except Exception as exc:
- self.fail("Failed to list offers: {0}".format(str(exc)))
-
- if response:
- for item in response:
- results.append(self.serialize_obj(item, 'VirtualMachineImageResource',
- enum_modules=AZURE_ENUM_MODULES))
- return results
-
- def list_publishers(self):
- response = None
- results = []
- try:
- response = self.compute_client.virtual_machine_images.list_publishers(self.location)
- except CloudError:
- pass
- except Exception as exc:
- self.fail("Failed to list publishers: {0}".format(str(exc)))
-
- if response:
- for item in response:
- results.append(self.serialize_obj(item, 'VirtualMachineImageResource',
- enum_modules=AZURE_ENUM_MODULES))
- return results
-
-
-def main():
- AzureRMVirtualMachineImageInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset.py
deleted file mode 100644
index 73be75bba8..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset.py
+++ /dev/null
@@ -1,1254 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Sertac Ozercan, <seozerca@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_virtualmachinescaleset
-
-version_added: "2.4"
-
-short_description: Manage Azure virtual machine scale sets
-
-description:
- - Create and update a virtual machine scale set.
- - Note that this module was called M(azure_rm_virtualmachine_scaleset) before Ansible 2.8. The usage did not change.
-
-options:
- resource_group:
- description:
- - Name of the resource group containing the virtual machine scale set.
- required: true
- name:
- description:
- - Name of the virtual machine.
- required: true
- state:
- description:
- - Assert the state of the virtual machine scale set.
- - State C(present) will check that the machine exists with the requested configuration. If the configuration
- of the existing machine does not match, the machine will be updated.
- - State C(absent) will remove the virtual machine scale set.
- default: present
- choices:
- - absent
- - present
- location:
- description:
- - Valid Azure location. Defaults to location of the resource group.
- short_hostname:
- description:
- - Short host name.
- vm_size:
- description:
- - A valid Azure VM size value. For example, C(Standard_D4).
- - The list of choices varies depending on the subscription and location. Check your subscription for available choices.
- capacity:
- description:
- - Capacity of VMSS.
- default: 1
- tier:
- description:
- - SKU Tier.
- choices:
- - Basic
- - Standard
- upgrade_policy:
- description:
- - Upgrade policy.
- - Required when creating the Azure virtual machine scale sets.
- choices:
- - Manual
- - Automatic
- admin_username:
- description:
- - Admin username used to access the host after it is created. Required when creating a VM.
- admin_password:
- description:
- - Password for the admin username.
- - Not required if the os_type is Linux and SSH password authentication is disabled by setting I(ssh_password_enabled=false).
- ssh_password_enabled:
- description:
- - When the os_type is Linux, setting I(ssh_password_enabled=false) will disable SSH password authentication and require use of SSH keys.
- type: bool
- default: true
- ssh_public_keys:
- description:
- - For I(os_type=Linux) provide a list of SSH keys.
- - Each item in the list should be a dictionary where the dictionary contains two keys, C(path) and C(key_data).
- - Set the C(path) to the default location of the authorized_keys files.
- - On an Enterprise Linux host, for example, the I(path=/home/<admin username>/.ssh/authorized_keys).
- Set C(key_data) to the actual value of the public key.
- image:
- description:
- - Specifies the image used to build the VM.
- - If a string, the image is sourced from a custom image based on the name.
- - If a dict with the keys I(publisher), I(offer), I(sku), and I(version), the image is sourced from a Marketplace image.
- Note that set I(version=latest) to get the most recent version of a given image.
- - If a dict with the keys I(name) and I(resource_group), the image is sourced from a custom image based on the I(name) and I(resource_group) set.
- Note that the key I(resource_group) is optional and if omitted, all images in the subscription will be searched for by I(name).
- - Custom image support was added in Ansible 2.5.
- required: true
- os_disk_caching:
- description:
- - Type of OS disk caching.
- choices:
- - ReadOnly
- - ReadWrite
- default: ReadOnly
- aliases:
- - disk_caching
- os_type:
- description:
- - Base type of operating system.
- choices:
- - Windows
- - Linux
- default: Linux
- managed_disk_type:
- description:
- - Managed disk type.
- choices:
- - Standard_LRS
- - Premium_LRS
- data_disks:
- description:
- - Describes list of data disks.
- version_added: "2.4"
- suboptions:
- lun:
- description:
- - The logical unit number for data disk.
- default: 0
- version_added: "2.4"
- disk_size_gb:
- description:
- - The initial disk size in GB for blank data disks.
- version_added: "2.4"
- managed_disk_type:
- description:
- - Managed data disk type.
- choices:
- - Standard_LRS
- - Premium_LRS
- version_added: "2.4"
- caching:
- description:
- - Type of data disk caching.
- choices:
- - ReadOnly
- - ReadWrite
- default: ReadOnly
- version_added: "2.4"
- create_option:
- description:
- - Specify whether disk should be created Empty or FromImage. This is required to allow custom
- images with data disks to be used.
- choices:
- - Empty
- - FromImage
- version_added: "2.10"
- virtual_network_resource_group:
- description:
- - When creating a virtual machine, if a specific virtual network from another resource group should be
- used.
- - Use this parameter to specify the resource group to use.
- version_added: "2.5"
- virtual_network_name:
- description:
- - Virtual Network name.
- aliases:
- - virtual_network
- subnet_name:
- description:
- - Subnet name.
- aliases:
- - subnet
- load_balancer:
- description:
- - Load balancer name.
- version_added: "2.5"
- application_gateway:
- description:
- - Application gateway name.
- version_added: "2.8"
- remove_on_absent:
- description:
- - When removing a VM using I(state=absent), also remove associated resources.
- - It can be C(all) or a list with any of the following ['network_interfaces', 'virtual_storage', 'public_ips'].
- - Any other input will be ignored.
- default: ['all']
- enable_accelerated_networking:
- description:
- - Indicates whether user wants to allow accelerated networking for virtual machines in scaleset being created.
- version_added: "2.7"
- type: bool
- security_group:
- description:
- - Existing security group with which to associate the subnet.
- - It can be the security group name which is in the same resource group.
- - It can be the resource ID.
- - It can be a dict which contains I(name) and I(resource_group) of the security group.
- version_added: "2.7"
- aliases:
- - security_group_name
- overprovision:
- description:
- - Specifies whether the Virtual Machine Scale Set should be overprovisioned.
- type: bool
- default: True
- version_added: "2.8"
- single_placement_group:
- description:
- - When true this limits the scale set to a single placement group, of max size 100 virtual machines.
- type: bool
- default: True
- version_added: "2.9"
- plan:
- description:
- - Third-party billing plan for the VM.
- version_added: "2.10"
- type: dict
- suboptions:
- name:
- description:
- - Billing plan name.
- required: true
- product:
- description:
- - Product name.
- required: true
- publisher:
- description:
- - Publisher offering the plan.
- required: true
- promotion_code:
- description:
- - Optional promotion code.
- zones:
- description:
- - A list of Availability Zones for your virtual machine scale set.
- type: list
- version_added: "2.8"
- custom_data:
- description:
- - Data which is made available to the virtual machine and used by e.g., C(cloud-init).
- - Many images in the marketplace are not cloud-init ready. Thus, data sent to I(custom_data) would be ignored.
- - If the image you are attempting to use is not listed in
- U(https://docs.microsoft.com/en-us/azure/virtual-machines/linux/using-cloud-init#cloud-init-overview),
- follow these steps U(https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cloudinit-prepare-custom-image).
- version_added: "2.8"
- scale_in_policy:
- description:
- - define the order in which vmss instances are scaled-in
- choices:
- - Default
- - NewestVM
- - OldestVM
- version_added: "2.10"
- terminate_event_timeout_minutes:
- description:
- - timeout time for termination notification event
- - in range between 5 and 15
- version_added: "2.10"
- priority:
- description:
- - If you want to request low-priority VMs for the VMSS, set this to "Low". The default is "Regular"
- default: Regular
- choices:
- - Regular
- - Low
- version_added: "2.10"
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Sertac Ozercan (@sozercan)
-
-'''
-EXAMPLES = '''
-
-- name: Create VMSS
- azure_rm_virtualmachinescaleset:
- resource_group: myResourceGroup
- name: testvmss
- vm_size: Standard_DS1_v2
- capacity: 2
- virtual_network_name: testvnet
- upgrade_policy: Manual
- subnet_name: testsubnet
- terminate_event_timeout_minutes: 10
- scale_in_policy: NewestVM
- admin_username: adminUser
- ssh_password_enabled: false
- ssh_public_keys:
- - path: /home/adminUser/.ssh/authorized_keys
- key_data: < insert yor ssh public key here... >
- managed_disk_type: Standard_LRS
- image:
- offer: CoreOS
- publisher: CoreOS
- sku: Stable
- version: latest
- data_disks:
- - lun: 0
- disk_size_gb: 64
- caching: ReadWrite
- managed_disk_type: Standard_LRS
-
-- name: Create VMSS with an image that requires plan information
- azure_rm_virtualmachinescaleset:
- resource_group: myResourceGroup
- name: testvmss
- vm_size: Standard_DS1_v2
- capacity: 3
- virtual_network_name: testvnet
- upgrade_policy: Manual
- subnet_name: testsubnet
- admin_username: adminUser
- ssh_password_enabled: false
- ssh_public_keys:
- - path: /home/adminUser/.ssh/authorized_keys
- key_data: < insert yor ssh public key here... >
- managed_disk_type: Standard_LRS
- image:
- offer: cis-ubuntu-linux-1804-l1
- publisher: center-for-internet-security-inc
- sku: Stable
- version: latest
- plan:
- name: cis-ubuntu-linux-1804-l1
- product: cis-ubuntu-linux-1804-l1
- publisher: center-for-internet-security-inc
- data_disks:
- - lun: 0
- disk_size_gb: 64
- caching: ReadWrite
- managed_disk_type: Standard_LRS
-
-- name: Create a VMSS with a custom image
- azure_rm_virtualmachinescaleset:
- resource_group: myResourceGroup
- name: testvmss
- vm_size: Standard_DS1_v2
- capacity: 2
- virtual_network_name: testvnet
- upgrade_policy: Manual
- subnet_name: testsubnet
- admin_username: adminUser
- admin_password: password01
- managed_disk_type: Standard_LRS
- image: customimage001
-
-- name: Create a VMSS with a custom image and override data disk
- azure_rm_virtualmachinescaleset:
- resource_group: myResourceGroup
- name: testvmss
- vm_size: Standard_DS1_v2
- capacity: 2
- virtual_network_name: testvnet
- upgrade_policy: Manual
- subnet_name: testsubnet
- admin_username: adminUser
- admin_password: password01
- managed_disk_type: Standard_LRS
- image: customimage001
- data_disks:
- - lun: 0
- disk_size_gb: 64
- caching: ReadWrite
- managed_disk_type: Standard_LRS
- create_option: FromImage
-
-- name: Create a VMSS with over 100 instances
- azure_rm_virtualmachinescaleset:
- resource_group: myResourceGroup
- name: testvmss
- vm_size: Standard_DS1_v2
- capacity: 120
- single_placement_group: False
- virtual_network_name: testvnet
- upgrade_policy: Manual
- subnet_name: testsubnet
- admin_username: adminUser
- admin_password: password01
- managed_disk_type: Standard_LRS
- image: customimage001
-
-- name: Create a VMSS with a custom image from a particular resource group
- azure_rm_virtualmachinescaleset:
- resource_group: myResourceGroup
- name: testvmss
- vm_size: Standard_DS1_v2
- capacity: 2
- virtual_network_name: testvnet
- upgrade_policy: Manual
- subnet_name: testsubnet
- admin_username: adminUser
- admin_password: password01
- managed_disk_type: Standard_LRS
- image:
- name: customimage001
- resource_group: myResourceGroup
-'''
-
-RETURN = '''
-azure_vmss:
- description:
- - Facts about the current state of the object.
- - Note that facts are not part of the registered output but available directly.
- returned: always
- type: dict
- sample: {
- "properties": {
- "overprovision": true,
- "scaleInPolicy": {
- "rules": [
- "NewestVM"
- ]
- },
- "singlePlacementGroup": true,
- "upgradePolicy": {
- "mode": "Manual"
- },
- "virtualMachineProfile": {
- "networkProfile": {
- "networkInterfaceConfigurations": [
- {
- "name": "testvmss",
- "properties": {
- "dnsSettings": {
- "dnsServers": []
- },
- "enableAcceleratedNetworking": false,
- "ipConfigurations": [
- {
- "name": "default",
- "properties": {
- "privateIPAddressVersion": "IPv4",
- "subnet": {
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/virtualNetworks/testvnet/subnets/testsubnet"
- }
- }
- }
- ],
- "primary": true
- }
- }
- ]
- },
- "osProfile": {
- "adminUsername": "testuser",
- "computerNamePrefix": "testvmss",
- "linuxConfiguration": {
- "disablePasswordAuthentication": true,
- "ssh": {
- "publicKeys": [
- {
- "keyData": "",
- "path": "/home/testuser/.ssh/authorized_keys"
- }
- ]
- }
- },
- "secrets": []
- },
- "scheduledEventsProfile": {
- "terminateNotificationProfile": {
- "enable": true,
- "notBeforeTimeout": "PT10M"
- }
- },
- "storageProfile": {
- "dataDisks": [
- {
- "caching": "ReadWrite",
- "createOption": "empty",
- "diskSizeGB": 64,
- "lun": 0,
- "managedDisk": {
- "storageAccountType": "Standard_LRS"
- }
- }
- ],
- "imageReference": {
- "offer": "CoreOS",
- "publisher": "CoreOS",
- "sku": "Stable",
- "version": "899.17.0"
- },
- "osDisk": {
- "caching": "ReadWrite",
- "createOption": "fromImage",
- "managedDisk": {
- "storageAccountType": "Standard_LRS"
- }
- }
- }
- }
- },
- "sku": {
- "capacity": 2,
- "name": "Standard_DS1_v2",
- "tier": "Standard"
- },
- "tags": null,
- "type": "Microsoft.Compute/virtualMachineScaleSets"
- }
-''' # NOQA
-
-import base64
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.tools import parse_resource_id
-
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, azure_id_to_dict, format_resource_id
-from ansible.module_utils.basic import to_native, to_bytes
-
-
-AZURE_OBJECT_CLASS = 'VirtualMachineScaleSet'
-
-AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
-
-
-class AzureRMVirtualMachineScaleSet(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- state=dict(choices=['present', 'absent'], default='present', type='str'),
- location=dict(type='str'),
- short_hostname=dict(type='str'),
- vm_size=dict(type='str'),
- tier=dict(type='str', choices=['Basic', 'Standard']),
- capacity=dict(type='int', default=1),
- upgrade_policy=dict(type='str', choices=['Automatic', 'Manual']),
- admin_username=dict(type='str'),
- admin_password=dict(type='str', no_log=True),
- ssh_password_enabled=dict(type='bool', default=True),
- ssh_public_keys=dict(type='list'),
- image=dict(type='raw'),
- os_disk_caching=dict(type='str', aliases=['disk_caching'], choices=['ReadOnly', 'ReadWrite'],
- default='ReadOnly'),
- os_type=dict(type='str', choices=['Linux', 'Windows'], default='Linux'),
- managed_disk_type=dict(type='str', choices=['Standard_LRS', 'Premium_LRS']),
- data_disks=dict(type='list'),
- subnet_name=dict(type='str', aliases=['subnet']),
- load_balancer=dict(type='str'),
- application_gateway=dict(type='str'),
- virtual_network_resource_group=dict(type='str'),
- virtual_network_name=dict(type='str', aliases=['virtual_network']),
- remove_on_absent=dict(type='list', default=['all']),
- enable_accelerated_networking=dict(type='bool'),
- security_group=dict(type='raw', aliases=['security_group_name']),
- overprovision=dict(type='bool', default=True),
- single_placement_group=dict(type='bool', default=True),
- zones=dict(type='list'),
- custom_data=dict(type='str'),
- plan=dict(type='dict', options=dict(publisher=dict(type='str', required=True),
- product=dict(type='str', required=True), name=dict(type='str', required=True),
- promotion_code=dict(type='str'))),
- scale_in_policy=dict(type='str', choices=['Default', 'OldestVM', 'NewestVM']),
- terminate_event_timeout_minutes=dict(type='int'),
- priority=dict(type='str', choices=['Regular', 'Low'], default='Regular')
- )
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.location = None
- self.short_hostname = None
- self.vm_size = None
- self.capacity = None
- self.tier = None
- self.upgrade_policy = None
- self.admin_username = None
- self.admin_password = None
- self.ssh_password_enabled = None
- self.ssh_public_keys = None
- self.image = None
- self.os_disk_caching = None
- self.managed_disk_type = None
- self.data_disks = None
- self.os_type = None
- self.subnet_name = None
- self.virtual_network_resource_group = None
- self.virtual_network_name = None
- self.tags = None
- self.differences = None
- self.load_balancer = None
- self.application_gateway = None
- self.enable_accelerated_networking = None
- self.security_group = None
- self.overprovision = None
- self.single_placement_group = None
- self.zones = None
- self.custom_data = None
- self.plan = None
- self.scale_in_policy = None
- self.terminate_event_timeout_minutes = None
- self.priority = None
-
- mutually_exclusive = [('load_balancer', 'application_gateway')]
- self.results = dict(
- changed=False,
- actions=[],
- ansible_facts=dict(azure_vmss=None)
- )
-
- super(AzureRMVirtualMachineScaleSet, self).__init__(
- derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- mutually_exclusive=mutually_exclusive)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- if self.module._name == 'azure_rm_virtualmachine_scaleset':
- self.module.deprecate("The 'azure_rm_virtualmachine_scaleset' module has been renamed to 'azure_rm_virtualmachinescaleset'", version='2.12')
-
- # make sure options are lower case
- self.remove_on_absent = set([resource.lower() for resource in self.remove_on_absent])
-
- # convert elements to ints
- self.zones = [int(i) for i in self.zones] if self.zones else None
-
- # default virtual_network_resource_group to resource_group
- if not self.virtual_network_resource_group:
- self.virtual_network_resource_group = self.resource_group
-
- changed = False
- results = dict()
- vmss = None
- disable_ssh_password = None
- subnet = None
- image_reference = None
- load_balancer_backend_address_pools = None
- load_balancer_inbound_nat_pools = None
- load_balancer = None
- application_gateway = None
- application_gateway_backend_address_pools = None
- support_lb_change = True
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- # Set default location
- self.location = resource_group.location
-
- if self.custom_data:
- self.custom_data = to_native(base64.b64encode(to_bytes(self.custom_data)))
-
- if self.state == 'present':
- # Verify parameters and resolve any defaults
-
- if self.vm_size and not self.vm_size_is_valid():
- self.fail("Parameter error: vm_size {0} is not valid for your subscription and location.".format(
- self.vm_size
- ))
-
- # if self.virtual_network_name:
- # virtual_network = self.get_virtual_network(self.virtual_network_name)
-
- if self.ssh_public_keys:
- msg = "Parameter error: expecting ssh_public_keys to be a list of type dict where " \
- "each dict contains keys: path, key_data."
- for key in self.ssh_public_keys:
- if not isinstance(key, dict):
- self.fail(msg)
- if not key.get('path') or not key.get('key_data'):
- self.fail(msg)
-
- if self.image and isinstance(self.image, dict):
- if all(key in self.image for key in ('publisher', 'offer', 'sku', 'version')):
- marketplace_image = self.get_marketplace_image_version()
- if self.image['version'] == 'latest':
- self.image['version'] = marketplace_image.name
- self.log("Using image version {0}".format(self.image['version']))
-
- image_reference = self.compute_models.ImageReference(
- publisher=self.image['publisher'],
- offer=self.image['offer'],
- sku=self.image['sku'],
- version=self.image['version']
- )
- elif self.image.get('name'):
- custom_image = True
- image_reference = self.get_custom_image_reference(
- self.image.get('name'),
- self.image.get('resource_group'))
- elif self.image.get('id'):
- try:
- image_reference = self.compute_models.ImageReference(id=self.image['id'])
- except Exception as exc:
- self.fail("id Error: Cannot get image from the reference id - {0}".format(self.image['id']))
- else:
- self.fail("parameter error: expecting image to contain [publisher, offer, sku, version], [name, resource_group] or [id]")
- elif self.image and isinstance(self.image, str):
- custom_image = True
- image_reference = self.get_custom_image_reference(self.image)
- elif self.image:
- self.fail("parameter error: expecting image to be a string or dict not {0}".format(type(self.image).__name__))
-
- disable_ssh_password = not self.ssh_password_enabled
-
- if self.load_balancer:
- load_balancer = self.get_load_balancer(self.load_balancer)
- load_balancer_backend_address_pools = ([self.compute_models.SubResource(id=resource.id)
- for resource in load_balancer.backend_address_pools]
- if load_balancer.backend_address_pools else None)
- load_balancer_inbound_nat_pools = ([self.compute_models.SubResource(id=resource.id)
- for resource in load_balancer.inbound_nat_pools]
- if load_balancer.inbound_nat_pools else None)
-
- if self.application_gateway:
- application_gateway = self.get_application_gateway(self.application_gateway)
- application_gateway_backend_address_pools = ([self.compute_models.SubResource(id=resource.id)
- for resource in application_gateway.backend_address_pools]
- if application_gateway.backend_address_pools else None)
-
- try:
- self.log("Fetching virtual machine scale set {0}".format(self.name))
- vmss = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name)
- self.check_provisioning_state(vmss, self.state)
- vmss_dict = self.serialize_vmss(vmss)
-
- if self.state == 'present':
- differences = []
- results = vmss_dict
-
- if self.os_disk_caching and \
- self.os_disk_caching != vmss_dict['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching']:
- self.log('CHANGED: virtual machine scale set {0} - OS disk caching'.format(self.name))
- differences.append('OS Disk caching')
- changed = True
- vmss_dict['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching'] = self.os_disk_caching
-
- if self.capacity and \
- self.capacity != vmss_dict['sku']['capacity']:
- self.log('CHANGED: virtual machine scale set {0} - Capacity'.format(self.name))
- differences.append('Capacity')
- changed = True
- vmss_dict['sku']['capacity'] = self.capacity
-
- if self.data_disks and \
- len(self.data_disks) != len(vmss_dict['properties']['virtualMachineProfile']['storageProfile'].get('dataDisks', [])):
- self.log('CHANGED: virtual machine scale set {0} - Data Disks'.format(self.name))
- differences.append('Data Disks')
- changed = True
-
- if self.upgrade_policy and \
- self.upgrade_policy != vmss_dict['properties']['upgradePolicy']['mode']:
- self.log('CHANGED: virtual machine scale set {0} - Upgrade Policy'.format(self.name))
- differences.append('Upgrade Policy')
- changed = True
- vmss_dict['properties']['upgradePolicy']['mode'] = self.upgrade_policy
-
- if image_reference and \
- image_reference.as_dict() != vmss_dict['properties']['virtualMachineProfile']['storageProfile']['imageReference']:
- self.log('CHANGED: virtual machine scale set {0} - Image'.format(self.name))
- differences.append('Image')
- changed = True
- vmss_dict['properties']['virtualMachineProfile']['storageProfile']['imageReference'] = image_reference.as_dict()
-
- update_tags, vmss_dict['tags'] = self.update_tags(vmss_dict.get('tags', dict()))
- if update_tags:
- differences.append('Tags')
- changed = True
-
- if bool(self.overprovision) != bool(vmss_dict['properties']['overprovision']):
- differences.append('overprovision')
- changed = True
-
- if bool(self.single_placement_group) != bool(vmss_dict['properties']['singlePlacementGroup']):
- differences.append('single_placement_group')
- changed = True
-
- vmss_dict['zones'] = [int(i) for i in vmss_dict['zones']] if 'zones' in vmss_dict and vmss_dict['zones'] else None
- if self.zones != vmss_dict['zones']:
- self.log("CHANGED: virtual machine scale sets {0} zones".format(self.name))
- differences.append('Zones')
- changed = True
- vmss_dict['zones'] = self.zones
-
- if self.terminate_event_timeout_minutes:
- timeout = self.terminate_event_timeout_minutes
- if timeout < 5 or timeout > 15:
- self.fail("terminate_event_timeout_minutes should >= 5 and <= 15")
- iso_8601_format = "PT" + str(timeout) + "M"
- old = vmss_dict['properties']['virtualMachineProfile'].get('scheduledEventsProfile', {}).\
- get('terminateNotificationProfile', {}).get('notBeforeTimeout', "")
- if old != iso_8601_format:
- differences.append('terminateNotification')
- changed = True
- vmss_dict['properties']['virtualMachineProfile'].setdefault('scheduledEventsProfile', {})['terminateNotificationProfile'] = {
- 'notBeforeTimeout': iso_8601_format,
- "enable": 'true'
- }
-
- if self.scale_in_policy and self.scale_in_policy != vmss_dict['properties'].get('scaleInPolicy', {}).get('rules', [""])[0]:
- self.log("CHANGED: virtual machine sale sets {0} scale in policy".format(self.name))
- differences.append('scaleInPolicy')
- changed = True
- vmss_dict['properties'].setdefault('scaleInPolicy', {})['rules'] = [self.scale_in_policy]
-
- nicConfigs = vmss_dict['properties']['virtualMachineProfile']['networkProfile']['networkInterfaceConfigurations']
-
- backend_address_pool = nicConfigs[0]['properties']['ipConfigurations'][0]['properties'].get('loadBalancerBackendAddressPools', [])
- backend_address_pool += nicConfigs[0]['properties']['ipConfigurations'][0]['properties'].get('applicationGatewayBackendAddressPools', [])
- lb_or_ag_id = None
- if (len(nicConfigs) != 1 or len(backend_address_pool) != 1):
- support_lb_change = False # Currently not support for the vmss contains more than one loadbalancer
- self.module.warn('Updating more than one load balancer on VMSS is currently not supported')
- else:
- if load_balancer:
- lb_or_ag_id = "{0}/".format(load_balancer.id)
- elif application_gateway:
- lb_or_ag_id = "{0}/".format(application_gateway.id)
-
- backend_address_pool_id = backend_address_pool[0].get('id')
- if lb_or_ag_id is not None and (bool(lb_or_ag_id) != bool(backend_address_pool_id) or not backend_address_pool_id.startswith(lb_or_ag_id)):
- differences.append('load_balancer')
- changed = True
-
- if self.custom_data:
- if self.custom_data != vmss_dict['properties']['virtualMachineProfile']['osProfile'].get('customData'):
- differences.append('custom_data')
- changed = True
- vmss_dict['properties']['virtualMachineProfile']['osProfile']['customData'] = self.custom_data
-
- self.differences = differences
-
- elif self.state == 'absent':
- self.log("CHANGED: virtual machine scale set {0} exists and requested state is 'absent'".format(self.name))
- results = dict()
- changed = True
-
- except CloudError:
- self.log('Virtual machine scale set {0} does not exist'.format(self.name))
- if self.state == 'present':
- self.log("CHANGED: virtual machine scale set {0} does not exist but state is 'present'.".format(self.name))
- changed = True
-
- self.results['changed'] = changed
- self.results['ansible_facts']['azure_vmss'] = results
-
- if self.check_mode:
- return self.results
-
- if changed:
- if self.state == 'present':
- if not vmss:
- # Create the VMSS
- if self.vm_size is None:
- self.fail("vm size must be set")
-
- self.log("Create virtual machine scale set {0}".format(self.name))
- self.results['actions'].append('Created VMSS {0}'.format(self.name))
-
- if self.os_type == 'Linux':
- if disable_ssh_password and not self.ssh_public_keys:
- self.fail("Parameter error: ssh_public_keys required when disabling SSH password.")
-
- if not self.virtual_network_name:
- self.fail("virtual network name is required")
-
- if self.subnet_name:
- subnet = self.get_subnet(self.virtual_network_name, self.subnet_name)
-
- if not self.short_hostname:
- self.short_hostname = self.name
-
- if not image_reference:
- self.fail("Parameter error: an image is required when creating a virtual machine.")
-
- managed_disk = self.compute_models.VirtualMachineScaleSetManagedDiskParameters(storage_account_type=self.managed_disk_type)
-
- if self.security_group:
- nsg = self.parse_nsg()
- if nsg:
- self.security_group = self.network_models.NetworkSecurityGroup(id=nsg.get('id'))
-
- plan = None
- if self.plan:
- plan = self.compute_models.Plan(name=self.plan.get('name'), product=self.plan.get('product'),
- publisher=self.plan.get('publisher'),
- promotion_code=self.plan.get('promotion_code'))
-
- os_profile = None
- if self.admin_username or self.custom_data or self.ssh_public_keys:
- os_profile = self.compute_models.VirtualMachineScaleSetOSProfile(
- admin_username=self.admin_username,
- computer_name_prefix=self.short_hostname,
- custom_data=self.custom_data
- )
-
- vmss_resource = self.compute_models.VirtualMachineScaleSet(
- location=self.location,
- overprovision=self.overprovision,
- single_placement_group=self.single_placement_group,
- tags=self.tags,
- upgrade_policy=self.compute_models.UpgradePolicy(
- mode=self.upgrade_policy
- ),
- sku=self.compute_models.Sku(
- name=self.vm_size,
- capacity=self.capacity,
- tier=self.tier,
- ),
- plan=plan,
- virtual_machine_profile=self.compute_models.VirtualMachineScaleSetVMProfile(
- priority=self.priority,
- os_profile=os_profile,
- storage_profile=self.compute_models.VirtualMachineScaleSetStorageProfile(
- os_disk=self.compute_models.VirtualMachineScaleSetOSDisk(
- managed_disk=managed_disk,
- create_option=self.compute_models.DiskCreateOptionTypes.from_image,
- caching=self.os_disk_caching,
- ),
- image_reference=image_reference,
- ),
- network_profile=self.compute_models.VirtualMachineScaleSetNetworkProfile(
- network_interface_configurations=[
- self.compute_models.VirtualMachineScaleSetNetworkConfiguration(
- name=self.name,
- primary=True,
- ip_configurations=[
- self.compute_models.VirtualMachineScaleSetIPConfiguration(
- name='default',
- subnet=self.compute_models.ApiEntityReference(
- id=subnet.id
- ),
- primary=True,
- load_balancer_backend_address_pools=load_balancer_backend_address_pools,
- load_balancer_inbound_nat_pools=load_balancer_inbound_nat_pools,
- application_gateway_backend_address_pools=application_gateway_backend_address_pools
- )
- ],
- enable_accelerated_networking=self.enable_accelerated_networking,
- network_security_group=self.security_group
- )
- ]
- )
- ),
- zones=self.zones
- )
-
- if self.scale_in_policy:
- vmss_resource.scale_in_policy = self.gen_scale_in_policy()
-
- if self.terminate_event_timeout_minutes:
- vmss_resource.virtual_machine_profile.scheduled_events_profile = self.gen_scheduled_event_profile()
-
- if self.admin_password:
- vmss_resource.virtual_machine_profile.os_profile.admin_password = self.admin_password
-
- if self.os_type == 'Linux' and os_profile:
- vmss_resource.virtual_machine_profile.os_profile.linux_configuration = self.compute_models.LinuxConfiguration(
- disable_password_authentication=disable_ssh_password
- )
-
- if self.ssh_public_keys:
- ssh_config = self.compute_models.SshConfiguration()
- ssh_config.public_keys = \
- [self.compute_models.SshPublicKey(path=key['path'], key_data=key['key_data']) for key in self.ssh_public_keys]
- vmss_resource.virtual_machine_profile.os_profile.linux_configuration.ssh = ssh_config
-
- if self.data_disks:
- data_disks = []
-
- for data_disk in self.data_disks:
- data_disk_managed_disk = self.compute_models.VirtualMachineScaleSetManagedDiskParameters(
- storage_account_type=data_disk.get('managed_disk_type', None)
- )
-
- data_disk['caching'] = data_disk.get(
- 'caching',
- self.compute_models.CachingTypes.read_only
- )
-
- data_disks.append(self.compute_models.VirtualMachineScaleSetDataDisk(
- lun=data_disk.get('lun', None),
- caching=data_disk.get('caching', None),
- create_option=data_disk.get('create_option', self.compute_models.DiskCreateOptionTypes.empty),
- disk_size_gb=data_disk.get('disk_size_gb', None),
- managed_disk=data_disk_managed_disk,
- ))
-
- vmss_resource.virtual_machine_profile.storage_profile.data_disks = data_disks
-
- if self.plan:
- try:
- plan_name = self.plan.get('name')
- plan_product = self.plan.get('product')
- plan_publisher = self.plan.get('publisher')
- term = self.marketplace_client.marketplace_agreements.get(
- publisher_id=plan_publisher, offer_id=plan_product, plan_id=plan_name)
- term.accepted = True
- self.marketplace_client.marketplace_agreements.create(
- publisher_id=plan_publisher, offer_id=plan_product, plan_id=plan_name, parameters=term)
- except Exception as exc:
- self.fail(("Error accepting terms for virtual machine {0} with plan {1}. " +
- "Only service admin/account admin users can purchase images " +
- "from the marketplace. - {2}").format(self.name, self.plan, str(exc)))
-
- self.log("Create virtual machine with parameters:")
- self.create_or_update_vmss(vmss_resource)
-
- elif self.differences and len(self.differences) > 0:
- self.log("Update virtual machine scale set {0}".format(self.name))
- self.results['actions'].append('Updated VMSS {0}'.format(self.name))
-
- vmss_resource = self.get_vmss()
- vmss_resource.virtual_machine_profile.storage_profile.os_disk.caching = self.os_disk_caching
- vmss_resource.sku.capacity = self.capacity
- vmss_resource.overprovision = self.overprovision
- vmss_resource.single_placement_group = self.single_placement_group
-
- if support_lb_change:
- if self.load_balancer:
- vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \
- .ip_configurations[0].load_balancer_backend_address_pools = load_balancer_backend_address_pools
- vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \
- .ip_configurations[0].load_balancer_inbound_nat_pools = load_balancer_inbound_nat_pools
- vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \
- .ip_configurations[0].application_gateway_backend_address_pools = None
- elif self.application_gateway:
- vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \
- .ip_configurations[0].application_gateway_backend_address_pools = application_gateway_backend_address_pools
- vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \
- .ip_configurations[0].load_balancer_backend_address_pools = None
- vmss_resource.virtual_machine_profile.network_profile.network_interface_configurations[0] \
- .ip_configurations[0].load_balancer_inbound_nat_pools = None
-
- if self.data_disks is not None:
- data_disks = []
- for data_disk in self.data_disks:
- data_disks.append(self.compute_models.VirtualMachineScaleSetDataDisk(
- lun=data_disk['lun'],
- caching=data_disk['caching'],
- create_option=data_disk.get('create_option', self.compute_models.DiskCreateOptionTypes.empty),
- disk_size_gb=data_disk['disk_size_gb'],
- managed_disk=self.compute_models.VirtualMachineScaleSetManagedDiskParameters(
- storage_account_type=data_disk.get('managed_disk_type', None)
- ),
- ))
- vmss_resource.virtual_machine_profile.storage_profile.data_disks = data_disks
-
- if self.scale_in_policy:
- vmss_resource.scale_in_policy = self.gen_scale_in_policy()
-
- if self.terminate_event_timeout_minutes:
- vmss_resource.virtual_machine_profile.scheduled_events_profile = self.gen_scheduled_event_profile()
-
- if image_reference is not None:
- vmss_resource.virtual_machine_profile.storage_profile.image_reference = image_reference
- self.log("Update virtual machine with parameters:")
- self.create_or_update_vmss(vmss_resource)
-
- self.results['ansible_facts']['azure_vmss'] = self.serialize_vmss(self.get_vmss())
-
- elif self.state == 'absent':
- # delete the VM
- self.log("Delete virtual machine scale set {0}".format(self.name))
- self.results['ansible_facts']['azure_vmss'] = None
- self.delete_vmss(vmss)
-
- # until we sort out how we want to do this globally
- del self.results['actions']
-
- return self.results
-
- def get_vmss(self):
- '''
- Get the VMSS
-
- :return: VirtualMachineScaleSet object
- '''
- try:
- vmss = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name)
- return vmss
- except CloudError as exc:
- self.fail("Error getting virtual machine scale set {0} - {1}".format(self.name, str(exc)))
-
- def get_virtual_network(self, name):
- try:
- vnet = self.network_client.virtual_networks.get(self.virtual_network_resource_group, name)
- return vnet
- except CloudError as exc:
- self.fail("Error fetching virtual network {0} - {1}".format(name, str(exc)))
-
- def get_subnet(self, vnet_name, subnet_name):
- self.log("Fetching subnet {0} in virtual network {1}".format(subnet_name, vnet_name))
- try:
- subnet = self.network_client.subnets.get(self.virtual_network_resource_group, vnet_name, subnet_name)
- except CloudError as exc:
- self.fail("Error: fetching subnet {0} in virtual network {1} - {2}".format(
- subnet_name,
- vnet_name,
- str(exc)))
- return subnet
-
- def get_load_balancer(self, id):
- id_dict = parse_resource_id(id)
- try:
- return self.network_client.load_balancers.get(id_dict.get('resource_group', self.resource_group), id_dict.get('name'))
- except CloudError as exc:
- self.fail("Error fetching load balancer {0} - {1}".format(id, str(exc)))
-
- def get_application_gateway(self, id):
- id_dict = parse_resource_id(id)
- try:
- return self.network_client.application_gateways.get(id_dict.get('resource_group', self.resource_group), id_dict.get('name'))
- except CloudError as exc:
- self.fail("Error fetching application_gateway {0} - {1}".format(id, str(exc)))
-
- def serialize_vmss(self, vmss):
- '''
- Convert a VirtualMachineScaleSet object to dict.
-
- :param vm: VirtualMachineScaleSet object
- :return: dict
- '''
-
- result = self.serialize_obj(vmss, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)
- result['id'] = vmss.id
- result['name'] = vmss.name
- result['type'] = vmss.type
- result['location'] = vmss.location
- result['tags'] = vmss.tags
-
- return result
-
- def delete_vmss(self, vmss):
- self.log("Deleting virtual machine scale set {0}".format(self.name))
- self.results['actions'].append("Deleted virtual machine scale set {0}".format(self.name))
- try:
- poller = self.compute_client.virtual_machine_scale_sets.delete(self.resource_group, self.name)
- # wait for the poller to finish
- self.get_poller_result(poller)
- except CloudError as exc:
- self.fail("Error deleting virtual machine scale set {0} - {1}".format(self.name, str(exc)))
-
- return True
-
- def get_marketplace_image_version(self):
- try:
- versions = self.compute_client.virtual_machine_images.list(self.location,
- self.image['publisher'],
- self.image['offer'],
- self.image['sku'])
- except CloudError as exc:
- self.fail("Error fetching image {0} {1} {2} - {3}".format(self.image['publisher'],
- self.image['offer'],
- self.image['sku'],
- str(exc)))
- if versions and len(versions) > 0:
- if self.image['version'] == 'latest':
- return versions[len(versions) - 1]
- for version in versions:
- if version.name == self.image['version']:
- return version
-
- self.fail("Error could not find image {0} {1} {2} {3}".format(self.image['publisher'],
- self.image['offer'],
- self.image['sku'],
- self.image['version']))
-
- def get_custom_image_reference(self, name, resource_group=None):
- try:
- if resource_group:
- vm_images = self.compute_client.images.list_by_resource_group(resource_group)
- else:
- vm_images = self.compute_client.images.list()
- except Exception as exc:
- self.fail("Error fetching custom images from subscription - {0}".format(str(exc)))
-
- for vm_image in vm_images:
- if vm_image.name == name:
- self.log("Using custom image id {0}".format(vm_image.id))
- return self.compute_models.ImageReference(id=vm_image.id)
-
- self.fail("Error could not find image with name {0}".format(name))
-
- def create_or_update_vmss(self, params):
- try:
- poller = self.compute_client.virtual_machine_scale_sets.create_or_update(self.resource_group, self.name, params)
- self.get_poller_result(poller)
- except CloudError as exc:
- self.fail("Error creating or updating virtual machine {0} - {1}".format(self.name, str(exc)))
-
- def vm_size_is_valid(self):
- '''
- Validate self.vm_size against the list of virtual machine sizes available for the account and location.
-
- :return: boolean
- '''
- try:
- sizes = self.compute_client.virtual_machine_sizes.list(self.location)
- except CloudError as exc:
- self.fail("Error retrieving available machine sizes - {0}".format(str(exc)))
- for size in sizes:
- if size.name == self.vm_size:
- return True
- return False
-
- def parse_nsg(self):
- nsg = self.security_group
- resource_group = self.resource_group
- if isinstance(self.security_group, dict):
- nsg = self.security_group.get('name')
- resource_group = self.security_group.get('resource_group', self.resource_group)
- id = format_resource_id(val=nsg,
- subscription_id=self.subscription_id,
- namespace='Microsoft.Network',
- types='networkSecurityGroups',
- resource_group=resource_group)
- name = azure_id_to_dict(id).get('name')
- return dict(id=id, name=name)
-
- def gen_scheduled_event_profile(self):
- if self.terminate_event_timeout_minutes is None:
- return None
-
- scheduledEventProfile = self.compute_models.ScheduledEventsProfile()
- terminationProfile = self.compute_models.TerminateNotificationProfile()
- terminationProfile.not_before_timeout = "PT" + str(self.terminate_event_timeout_minutes) + "M"
- terminationProfile.enable = True
- scheduledEventProfile.terminate_notification_profile = terminationProfile
- return scheduledEventProfile
-
- def gen_scale_in_policy(self):
- if self.scale_in_policy is None:
- return None
-
- return self.compute_models.ScaleInPolicy(rules=[self.scale_in_policy])
-
-
-def main():
- AzureRMVirtualMachineScaleSet()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset_info.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset_info.py
deleted file mode 100644
index 8ccad04285..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset_info.py
+++ /dev/null
@@ -1,437 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2017, Sertac Ozercan <seozerca@microsoft.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_virtualmachinescaleset_info
-
-version_added: "2.9"
-
-short_description: Get Virtual Machine Scale Set facts
-
-description:
- - Get facts for a virtual machine scale set.
- - Note that this module was called M(azure_rm_virtualmachine_scaleset_facts) before Ansible 2.8. The usage did not change.
-
-options:
- name:
- description:
- - Limit results to a specific virtual machine scale set.
- resource_group:
- description:
- - The resource group to search for the desired virtual machine scale set.
- tags:
- description:
- - List of tags to be matched.
- format:
- description:
- - Format of the data returned.
- - If C(raw) is selected information will be returned in raw format from Azure Python SDK.
- - If C(curated) is selected the structure will be identical to input parameters of M(azure_rm_virtualmachinescaleset) module.
- - In Ansible 2.5 and lower facts are always returned in raw format.
- - Please note that this option will be deprecated in 2.10 when curated format will become the only supported format.
- default: 'raw'
- choices:
- - 'curated'
- - 'raw'
- version_added: "2.6"
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Sertac Ozercan (@sozercan)
-'''
-
-EXAMPLES = '''
- - name: Get facts for a virtual machine scale set
- azure_rm_virtualmachinescaleset_info:
- resource_group: myResourceGroup
- name: testvmss001
- format: curated
-
- - name: Get facts for all virtual networks
- azure_rm_virtualmachinescaleset_info:
- resource_group: myResourceGroup
-
- - name: Get facts by tags
- azure_rm_virtualmachinescaleset_info:
- resource_group: myResourceGroup
- tags:
- - testing
-'''
-
-RETURN = '''
-vmss:
- description:
- - List of virtual machine scale sets.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/scalesets/myscaleset
- admin_username:
- description:
- - Admin username used to access the host after it is created.
- returned: always
- type: str
- sample: adminuser
- capacity:
- description:
- - Capacity of VMSS.
- returned: always
- type: int
- sample: 2
- data_disks:
- description:
- - List of attached data disks.
- returned: always
- type: complex
- contains:
- caching:
- description:
- - Type of data disk caching.
- returned: always
- type: str
- sample: ReadOnly
- disk_size_gb:
- description:
- - The initial disk size in GB for blank data disks.
- returned: always
- type: int
- sample: 64
- lun:
- description:
- - The logical unit number for data disk.
- returned: always
- type: int
- sample: 0
- managed_disk_type:
- description:
- - Managed data disk type.
- returned: always
- type: str
- sample: Standard_LRS
- image:
- description:
- - Image specification.
- returned: always
- type: complex
- contains:
- offer:
- description:
- - The offer of the platform image or marketplace image used to create the virtual machine.
- returned: always
- type: str
- sample: RHEL
- publisher:
- description:
- - Publisher name.
- returned: always
- type: str
- sample: RedHat
- sku:
- description:
- - SKU name.
- returned: always
- type: str
- sample: 7-RAW
- version:
- description:
- - Image version.
- returned: always
- type: str
- sample: 7.5.2018050901
- load_balancer:
- description:
- - Load balancer name.
- returned: always
- type: str
- sample: testlb
- location:
- description:
- - Resource location.
- type: str
- returned: always
- sample: japaneast
- managed_disk_type:
- description:
- - Managed data disk type.
- type: str
- returned: always
- sample: Standard_LRS
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: myvmss
- os_disk_caching:
- description:
- - Type of OS disk caching.
- type: str
- returned: always
- sample: ReadOnly
- os_type:
- description:
- - Base type of operating system.
- type: str
- returned: always
- sample: Linux
- overprovision:
- description:
- - Specifies whether the Virtual Machine Scale Set should be overprovisioned.
- type: bool
- sample: true
- resource_group:
- description:
- - Resource group.
- type: str
- returned: always
- sample: myResourceGroup
- ssh_password_enabled:
- description:
- - Is SSH password authentication enabled. Valid only for Linux.
- type: bool
- returned: always
- sample: true
- subnet_name:
- description:
- - Subnet name.
- type: str
- returned: always
- sample: testsubnet
- tier:
- description:
- - SKU Tier.
- type: str
- returned: always
- sample: Basic
- upgrade_policy:
- description:
- - Upgrade policy.
- type: str
- returned: always
- sample: Manual
- virtual_network_name:
- description:
- - Associated virtual network name.
- type: str
- returned: always
- sample: testvn
- vm_size:
- description:
- - Virtual machine size.
- type: str
- returned: always
- sample: Standard_D4
- tags:
- description:
- - Tags assigned to the resource. Dictionary of string:string pairs.
- returned: always
- type: dict
- sample: { "tag1": "abc" }
-''' # NOQA
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-import re
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except Exception:
- # handled in azure_rm_common
- pass
-
-AZURE_OBJECT_CLASS = 'VirtualMachineScaleSet'
-
-AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
-
-
-class AzureRMVirtualMachineScaleSetInfo(AzureRMModuleBase):
- """Utility class to get virtual machine scale set facts"""
-
- def __init__(self):
-
- self.module_args = dict(
- name=dict(type='str'),
- resource_group=dict(type='str'),
- tags=dict(type='list'),
- format=dict(
- type='str',
- choices=['curated',
- 'raw'],
- default='raw'
- )
- )
-
- self.results = dict(
- changed=False,
- )
-
- self.name = None
- self.resource_group = None
- self.format = None
- self.tags = None
-
- super(AzureRMVirtualMachineScaleSetInfo, self).__init__(
- derived_arg_spec=self.module_args,
- supports_tags=False,
- facts_module=True
- )
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_virtualmachinescaleset_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_virtualmachinescaleset_facts' module has been renamed to 'azure_rm_virtualmachinescaleset_info'",
- version='2.13')
-
- for key in self.module_args:
- setattr(self, key, kwargs[key])
-
- if self.name and not self.resource_group:
- self.fail("Parameter error: resource group required when filtering by name.")
-
- if self.name:
- result = self.get_item()
- else:
- result = self.list_items()
-
- if self.format == 'curated':
- for index in range(len(result)):
- vmss = result[index]
- subnet_name = None
- load_balancer_name = None
- virtual_network_name = None
- ssh_password_enabled = False
-
- try:
- subnet_id = (vmss['properties']['virtualMachineProfile']['networkProfile']['networkInterfaceConfigurations'][0]
- ['properties']['ipConfigurations'][0]['properties']['subnet']['id'])
- subnet_name = re.sub('.*subnets\\/', '', subnet_id)
- except Exception:
- self.log('Could not extract subnet name')
-
- try:
- backend_address_pool_id = (vmss['properties']['virtualMachineProfile']['networkProfile']['networkInterfaceConfigurations'][0]
- ['properties']['ipConfigurations'][0]['properties']['loadBalancerBackendAddressPools'][0]['id'])
- load_balancer_name = re.sub('\\/backendAddressPools.*', '', re.sub('.*loadBalancers\\/', '', backend_address_pool_id))
- virtual_network_name = re.sub('.*virtualNetworks\\/', '', re.sub('\\/subnets.*', '', subnet_id))
- except Exception:
- self.log('Could not extract load balancer / virtual network name')
-
- try:
- ssh_password_enabled = (not vmss['properties']['virtualMachineProfile']['osProfile']
- ['linuxConfiguration']['disablePasswordAuthentication'])
- except Exception:
- self.log('Could not extract SSH password enabled')
-
- data_disks = vmss['properties']['virtualMachineProfile']['storageProfile'].get('dataDisks', [])
-
- for disk_index in range(len(data_disks)):
- old_disk = data_disks[disk_index]
- new_disk = {
- 'lun': old_disk['lun'],
- 'disk_size_gb': old_disk['diskSizeGB'],
- 'managed_disk_type': old_disk['managedDisk']['storageAccountType'],
- 'caching': old_disk['caching']
- }
- data_disks[disk_index] = new_disk
-
- updated = {
- 'id': vmss['id'],
- 'resource_group': self.resource_group,
- 'name': vmss['name'],
- 'state': 'present',
- 'location': vmss['location'],
- 'vm_size': vmss['sku']['name'],
- 'capacity': vmss['sku']['capacity'],
- 'tier': vmss['sku']['tier'],
- 'upgrade_policy': vmss['properties']['upgradePolicy']['mode'],
- 'admin_username': vmss['properties']['virtualMachineProfile']['osProfile']['adminUsername'],
- 'admin_password': vmss['properties']['virtualMachineProfile']['osProfile'].get('adminPassword'),
- 'ssh_password_enabled': ssh_password_enabled,
- 'image': vmss['properties']['virtualMachineProfile']['storageProfile']['imageReference'],
- 'os_disk_caching': vmss['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching'],
- 'os_type': 'Linux' if (vmss['properties']['virtualMachineProfile']['osProfile'].get('linuxConfiguration') is not None) else 'Windows',
- 'overprovision': vmss['properties']['overprovision'],
- 'managed_disk_type': vmss['properties']['virtualMachineProfile']['storageProfile']['osDisk']['managedDisk']['storageAccountType'],
- 'data_disks': data_disks,
- 'virtual_network_name': virtual_network_name,
- 'subnet_name': subnet_name,
- 'load_balancer': load_balancer_name,
- 'tags': vmss.get('tags')
- }
-
- result[index] = updated
-
- if is_old_facts:
- self.results['ansible_facts'] = {
- 'azure_vmss': result
- }
- if self.format == 'curated':
- # proper result format we want to support in the future
- # dropping 'ansible_facts' and shorter name 'vmss'
- self.results['vmss'] = result
- else:
- self.results['vmss'] = result
-
- return self.results
-
- def get_item(self):
- """Get a single virtual machine scale set"""
-
- self.log('Get properties for {0}'.format(self.name))
-
- item = None
- results = []
-
- try:
- item = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name)
- except CloudError:
- pass
-
- if item and self.has_tags(item.tags, self.tags):
- results = [self.serialize_obj(item, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)]
-
- return results
-
- def list_items(self):
- """Get all virtual machine scale sets"""
-
- self.log('List all virtual machine scale sets')
-
- try:
- response = self.compute_client.virtual_machine_scale_sets.list(self.resource_group)
- except CloudError as exc:
- self.fail('Failed to list all items - {0}'.format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES))
-
- return results
-
-
-def main():
- """Main module execution code path"""
-
- AzureRMVirtualMachineScaleSetInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetextension.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetextension.py
deleted file mode 100644
index 8a28c26c75..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetextension.py
+++ /dev/null
@@ -1,301 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_virtualmachinescalesetextension
-
-version_added: "2.8"
-
-short_description: Manage Azure Virtual Machine Scale Set (VMSS) extensions
-
-description:
- - Create, update and delete Azure Virtual Machine Scale Set (VMSS) extensions.
-
-options:
- resource_group:
- description:
- - Name of a resource group where the VMSS extension exists or will be created.
- required: true
- vmss_name:
- description:
- - The name of the virtual machine where the extension should be create or updated.
- required: true
- name:
- description:
- - Name of the VMSS extension.
- location:
- description:
- - Valid Azure location. Defaults to location of the resource group.
- publisher:
- description:
- - The name of the extension handler publisher.
- type:
- description:
- - The type of the extension handler.
- type_handler_version:
- description:
- - The type version of the extension handler.
- settings:
- description:
- - A dictionary containing extension settings.
- - Settings depend on extension type.
- - Refer to U(https://docs.microsoft.com/en-us/azure/virtual-machines/extensions/overview) for more information.
- protected_settings:
- description:
- - A dictionary containing protected extension settings.
- - Settings depend on extension type.
- - Refer to U(https://docs.microsoft.com/en-us/azure/virtual-machines/extensions/overview) for more information.
- auto_upgrade_minor_version:
- description:
- - Whether the extension handler should be automatically upgraded across minor versions.
- type: bool
- state:
- description:
- - Assert the state of the extension.
- - Use C(present) to create or update an extension and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-'''
-
-EXAMPLES = '''
- - name: Install VMSS Extension
- azure_rm_virtualmachinescalesetextension:
- name: myvmssextension
- location: eastus
- resource_group: myResourceGroup
- vmss_name: myvm
- publisher: Microsoft.Azure.Extensions
- type: CustomScript
- type_handler_version: 2.0
- settings: '{"commandToExecute": "hostname"}'
- auto_upgrade_minor_version: true
-
- - name: Remove VMSS Extension
- azure_rm_virtualmachinescalesetextension:
- name: myvmssextension
- location: eastus
- resource_group: myResourceGroup
- vmss_name: myvm
- state: absent
-'''
-
-RETURN = '''
-id:
- description:
- - VMSS extension resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/scalesets/myscaleset/extensions/myext
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMVMSSExtension(AzureRMModuleBase):
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- vmss_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- publisher=dict(
- type='str'
- ),
- type=dict(
- type='str'
- ),
- type_handler_version=dict(
- type='str'
- ),
- auto_upgrade_minor_version=dict(
- type='bool'
- ),
- settings=dict(
- type='dict'
- ),
- protected_settings=dict(
- type='dict'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- ),
- )
-
- self.resource_group = None
- self.name = None
- self.location = None
- self.publisher = None
- self.type = None
- self.type_handler_version = None
- self.auto_upgrade_minor_version = None
- self.settings = None
- self.protected_settings = None
- self.state = None
-
- required_if = [
- ('state', 'present', [
- 'publisher', 'type', 'type_handler_version'])
- ]
-
- self.results = dict(changed=False, state=dict())
-
- super(AzureRMVMSSExtension, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_tags=False,
- required_if=required_if)
-
- def exec_module(self, **kwargs):
- for key in list(self.module_arg_spec.keys()):
- setattr(self, key, kwargs[key])
-
- resource_group = None
- response = None
- to_be_updated = False
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- if self.state == 'present':
- response = self.get_vmssextension()
- if not response:
- to_be_updated = True
- else:
- if self.settings is not None:
- if response.get('settings') != self.settings:
- response['settings'] = self.settings
- to_be_updated = True
- else:
- self.settings = response.get('settings')
-
- if self.protected_settings is not None:
- if response.get('protected_settings') != self.protected_settings:
- response['protected_settings'] = self.protected_settings
- to_be_updated = True
- else:
- self.protected_settings = response.get('protected_settings')
-
- if response['publisher'] != self.publisher:
- self.publisher = response['publisher']
- self.module.warn("Property 'publisher' cannot be changed")
-
- if response['type'] != self.type:
- self.type = response['type']
- self.module.warn("Property 'type' cannot be changed")
-
- if response['type_handler_version'] != self.type_handler_version:
- response['type_handler_version'] = self.type_handler_version
- to_be_updated = True
-
- if self.auto_upgrade_minor_version is not None:
- if response['auto_upgrade_minor_version'] != self.auto_upgrade_minor_version:
- response['auto_upgrade_minor_version'] = self.auto_upgrade_minor_version
- to_be_updated = True
- else:
- self.auto_upgrade_minor_version = response['auto_upgrade_minor_version']
-
- if to_be_updated:
- if not self.check_mode:
- response = self.create_or_update_vmssextension()
- self.results['changed'] = True
- elif self.state == 'absent':
- if not self.check_mode:
- self.delete_vmssextension()
- self.results['changed'] = True
-
- if response:
- self.results['id'] = response.get('id')
-
- return self.results
-
- def create_or_update_vmssextension(self):
- self.log("Creating VMSS extension {0}".format(self.name))
- try:
- params = self.compute_models.VirtualMachineScaleSetExtension(
- location=self.location,
- publisher=self.publisher,
- type=self.type,
- type_handler_version=self.type_handler_version,
- auto_upgrade_minor_version=self.auto_upgrade_minor_version,
- settings=self.settings,
- protected_settings=self.protected_settings
- )
- poller = self.compute_client.virtual_machine_scale_set_extensions.create_or_update(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- vmss_extension_name=self.name,
- extension_parameters=params)
- response = self.get_poller_result(poller)
- return response.as_dict()
-
- except CloudError as e:
- self.log('Error attempting to create the VMSS extension.')
- self.fail("Error creating the VMSS extension: {0}".format(str(e)))
-
- def delete_vmssextension(self):
- self.log("Deleting vmextension {0}".format(self.name))
- try:
- poller = self.compute_client.virtual_machine_scale_set_extensions.delete(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- vmss_extension_name=self.name)
- self.get_poller_result(poller)
- except CloudError as e:
- self.log('Error attempting to delete the vmextension.')
- self.fail("Error deleting the vmextension: {0}".format(str(e)))
-
- def get_vmssextension(self):
- self.log("Checking if the VMSS extension {0} is present".format(self.name))
- try:
- response = self.compute_client.virtual_machine_scale_set_extensions.get(self.resource_group, self.vmss_name, self.name)
- return response.as_dict()
- except CloudError as e:
- self.log('Did not find VMSS extension')
- return False
-
-
-def main():
- AzureRMVMSSExtension()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetextension_info.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetextension_info.py
deleted file mode 100644
index a5e11d146e..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetextension_info.py
+++ /dev/null
@@ -1,228 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_virtualmachinescalesetextension_info
-version_added: "2.9"
-short_description: Get Azure Virtual Machine Scale Set Extension facts
-description:
- - Get facts of Azure Virtual Machine Scale Set Extension.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- vmss_name:
- description:
- - The name of VMSS containing the extension.
- required: True
- name:
- description:
- - The name of the virtual machine extension.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Get information on specific Virtual Machine Scale Set Extension
- azure_rm_virtualmachineextension_info:
- resource_group: myResourceGroup
- vmss_name: myvmss
- name: myextension
-
- - name: List installed Virtual Machine Scale Set Extensions
- azure_rm_virtualmachineextension_info:
- resource_group: myrg
- vmss_name: myvmss
-'''
-
-RETURN = '''
-extensions:
- description:
- - A list of dictionaries containing facts for Virtual Machine Extension.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScaleSets/
- myvmss/extensions/myextension"
- resource_group:
- description:
- - Resource group name.
- returned: always
- type: str
- sample: myrg
- vmss_name:
- description:
- - Virtual machine scale set name.
- returned: always
- type: str
- sample: myvmss
- name:
- description:
- - Virtual machine extension name.
- returned: always
- type: str
- sample: myextension
- publisher:
- description:
- - Extension publisher.
- returned: always
- type: str
- sample: Microsoft.Azure.Extensions
- type:
- description:
- - Extension type.
- returned: always
- type: str
- sample: CustomScript
- settings:
- description:
- - Extension specific settings dictionary.
- returned: always
- type: dict
- sample: { 'commandToExecute':'hostname' }
- auto_upgrade_minor_version:
- description:
- - Autoupgrade minor version flag.
- returned: always
- type: bool
- sample: true
- provisioning_state:
- description:
- - Provisioning state of the extension.
- returned: always
- type: str
- sample: Succeeded
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMVirtualMachineScaleSetExtensionInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- vmss_name=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.resource_group = None
- self.vmss_name = None
- self.name = None
- super(AzureRMVirtualMachineScaleSetExtensionInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_virtualmachinescalesetextension_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_virtualmachinescalesetextension_facts' module has been renamed to" +
- " 'azure_rm_virtualmachinescalesetextension_info'",
- version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name is not None:
- self.results['extensions'] = self.get_extensions()
- else:
- self.results['extensions'] = self.list_extensions()
-
- return self.results
-
- def get_extensions(self):
- response = None
- results = []
- try:
- response = self.compute_client.virtual_machine_scale_set_extensions.get(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- vmss_extension_name=self.name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Virtual Machine Extension.')
-
- if response:
- results.append(self.format_response(response))
-
- return results
-
- def list_extensions(self):
- response = None
- results = []
- try:
- response = self.compute_client.virtual_machine_scale_set_extensions.list(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Virtual Machine Extension.')
-
- if response is not None:
- for item in response:
- results.append(self.format_response(item))
-
- return results
-
- def format_response(self, item):
- id_template = "/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/virtualMachineScaleSets/{2}/extensions/{3}"
- d = item.as_dict()
- d = {
- 'id': id_template.format(self.subscription_id, self.resource_group, self.vmss_name, d.get('name')),
- 'resource_group': self.resource_group,
- 'vmss_name': self.vmss_name,
- 'name': d.get('name'),
- 'publisher': d.get('publisher'),
- 'type': d.get('type'),
- 'settings': d.get('settings'),
- 'auto_upgrade_minor_version': d.get('auto_upgrade_minor_version'),
- 'provisioning_state': d.get('provisioning_state')
- }
- return d
-
-
-def main():
- AzureRMVirtualMachineScaleSetExtensionInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetinstance.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetinstance.py
deleted file mode 100644
index 6f4a6f5c39..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetinstance.py
+++ /dev/null
@@ -1,325 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_virtualmachinescalesetinstance
-version_added: "2.8"
-short_description: Get Azure Virtual Machine Scale Set Instance facts
-description:
- - Get facts of Azure Virtual Machine Scale Set VMs.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- vmss_name:
- description:
- - The name of the VM scale set.
- required: True
- instance_id:
- description:
- - The instance ID of the virtual machine.
- required: True
- latest_model:
- type: bool
- description:
- - Set to C(yes) to upgrade to the latest model.
- power_state:
- description:
- - Use this option to change power state of the instance.
- choices:
- - 'running'
- - 'stopped'
- - 'deallocated'
- protect_from_scale_in:
- type: bool
- description:
- - turn on/off instance protection from scale in
- version_added: "2.10"
- protect_from_scale_set_actions:
- type: bool
- description:
- - tun on/off instance protection from scale set actions
- version_added: "2.10"
- state:
- description:
- - State of the VMSS instance. Use C(present) to update an instance and C(absent) to delete an instance.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: Upgrade instance to the latest image
- azure_rm_virtualmachinescalesetinstance:
- resource_group: myResourceGroup
- vmss_name: myVMSS
- instance_id: "2"
- latest_model: yes
-
- - name: Turn on protect from scale in
- azure_rm_virtualmachinescalesetinstance:
- resource_group: myResourceGroup
- vmss_name: myVMSS
- instance_id: "2"
- protect_from_scale_in: true
-'''
-
-RETURN = '''
-instances:
- description:
- - A list of instances.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Instance resource ID.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/TestGroup/providers/Microsoft.Compute/scalesets/myscaleset/vms/myvm
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.compute import ComputeManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMVirtualMachineScaleSetInstance(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- vmss_name=dict(
- type='str',
- required=True
- ),
- instance_id=dict(
- type='str'
- ),
- latest_model=dict(
- type='bool'
- ),
- power_state=dict(
- type='str',
- choices=['running', 'stopped', 'deallocated']
- ),
- protect_from_scale_in=dict(
- type='bool'
- ),
- protect_from_scale_set_actions=dict(
- type='bool'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.vmss_name = None
- self.instance_id = None
- self.latest_model = None
- self.power_state = None
- self.state = None
- self.protect_from_scale_in = None
- self.protect_from_scale_set_actions = None
- super(AzureRMVirtualMachineScaleSetInstance, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(ComputeManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager,
- api_version='2019-07-01')
-
- instances = self.get()
-
- if self.state == 'absent':
- for item in instances:
- if not self.check_mode:
- self.delete(item['instance_id'])
- self.results['changed'] = True
- self.results['instances'] = []
- else:
- if self.latest_model is not None:
- for item in instances:
- if not item.get('latest_model', None):
- if not self.check_mode:
- self.apply_latest_model(item['instance_id'])
- item['latest_model'] = True
- self.results['changed'] = True
-
- if self.power_state is not None:
- for item in instances:
- if self.power_state == 'stopped' and item['power_state'] not in ['stopped', 'stopping']:
- if not self.check_mode:
- self.stop(item['instance_id'])
- self.results['changed'] = True
- elif self.power_state == 'deallocated' and item['power_state'] not in ['deallocated']:
- if not self.check_mode:
- self.deallocate(item['instance_id'])
- self.results['changed'] = True
- elif self.power_state == 'running' and item['power_state'] not in ['running']:
- if not self.check_mode:
- self.start(item['instance_id'])
- self.results['changed'] = True
- if self.protect_from_scale_in is not None or self.protect_from_scale_set_actions is not None:
- for item in instances:
- protection_policy = item['protection_policy']
- if protection_policy is None or self.protect_from_scale_in != protection_policy['protect_from_scale_in'] or \
- self.protect_from_scale_set_actions != protection_policy['protect_from_scale_set_actions']:
- if not self.check_mode:
- self.update_protection_policy(self.instance_id, self.protect_from_scale_in, self.protect_from_scale_set_actions)
- self.results['changed'] = True
-
- self.results['instances'] = [{'id': item['id']} for item in instances]
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.virtual_machine_scale_set_vms.get(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- instance_id=self.instance_id)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Virtual Machine Scale Set VM.')
-
- if response:
- results.append(self.format_response(response))
-
- return results
-
- def apply_latest_model(self, instance_id):
- try:
- poller = self.compute_client.virtual_machine_scale_sets.update_instances(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- instance_ids=[instance_id])
- self.get_poller_result(poller)
- except CloudError as exc:
- self.log("Error applying latest model {0} - {1}".format(self.vmss_name, str(exc)))
- self.fail("Error applying latest model {0} - {1}".format(self.vmss_name, str(exc)))
-
- def delete(self, instance_id):
- try:
- self.mgmt_client.virtual_machine_scale_set_vms.delete(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- instance_id=instance_id)
- except CloudError as e:
- self.log('Could not delete instance of Virtual Machine Scale Set VM.')
- self.fail('Could not delete instance of Virtual Machine Scale Set VM.')
-
- def start(self, instance_id):
- try:
- self.mgmt_client.virtual_machine_scale_set_vms.start(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- instance_id=instance_id)
- except CloudError as e:
- self.log('Could not start instance of Virtual Machine Scale Set VM.')
- self.fail('Could not start instance of Virtual Machine Scale Set VM.')
-
- def stop(self, instance_id):
- try:
- self.mgmt_client.virtual_machine_scale_set_vms.power_off(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- instance_id=instance_id)
- except CloudError as e:
- self.log('Could not stop instance of Virtual Machine Scale Set VM.')
- self.fail('Could not stop instance of Virtual Machine Scale Set VM.')
-
- def deallocate(self, instance_id):
- try:
- self.mgmt_client.virtual_machine_scale_set_vms.deallocate(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- instance_id=instance_id)
- except CloudError as e:
- self.log('Could not deallocate instance of Virtual Machine Scale Set VM.')
- self.fail('Could not deallocate instance of Virtual Machine Scale Set VM.')
-
- def update_protection_policy(self, instance_id, protect_from_scale_in, protect_from_scale_set_actions):
- try:
- d = {}
- if protect_from_scale_in is not None:
- d['protect_from_scale_in'] = protect_from_scale_in
- if protect_from_scale_set_actions is not None:
- d['protect_from_scale_set_actions'] = protect_from_scale_set_actions
- protection_policy = self.compute_models.VirtualMachineScaleSetVMProtectionPolicy(**d)
- instance = self.mgmt_client.virtual_machine_scale_set_vms.get(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- instance_id=instance_id)
- instance.protection_policy = protection_policy
- poller = self.mgmt_client.virtual_machine_scale_set_vms.update(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- instance_id=instance_id,
- parameters=instance)
- self.get_poller_result(poller)
- except CloudError as e:
- self.log('Could not update instance protection policy.')
- self.fail('Could not update instance protection policy.')
-
- def format_response(self, item):
- d = item.as_dict()
- iv = self.mgmt_client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- instance_id=d.get('instance_id', None)).as_dict()
- power_state = ""
- for index in range(len(iv['statuses'])):
- code = iv['statuses'][index]['code'].split('/')
- if code[0] == 'PowerState':
- power_state = code[1]
- break
- d = {
- 'id': d.get('id'),
- 'tags': d.get('tags'),
- 'instance_id': d.get('instance_id'),
- 'latest_model': d.get('latest_model_applied'),
- 'power_state': power_state,
- 'protection_policy': d.get('protection_policy')
- }
- return d
-
-
-def main():
- AzureRMVirtualMachineScaleSetInstance()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetinstance_info.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetinstance_info.py
deleted file mode 100644
index 302bc7d97b..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetinstance_info.py
+++ /dev/null
@@ -1,243 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Zim Kalinowski, <zikalino@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_virtualmachinescalesetinstance_info
-version_added: "2.9"
-short_description: Get Azure Virtual Machine Scale Set Instance facts
-description:
- - Get facts of Azure Virtual Machine Scale Set VMs.
-
-options:
- resource_group:
- description:
- - The name of the resource group.
- required: True
- vmss_name:
- description:
- - The name of the VM scale set.
- required: True
- instance_id:
- description:
- - The instance ID of the virtual machine.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Zim Kalinowski (@zikalino)
-
-'''
-
-EXAMPLES = '''
- - name: List VM instances in Virtual Machine ScaleSet
- azure_rm_virtualmachinescalesetinstance_info:
- resource_group: myResourceGroup
- vmss_name: myVMSS
-'''
-
-RETURN = '''
-instances:
- description:
- - A list of dictionaries containing facts for Virtual Machine Scale Set VM.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/virtualMachineScaleSets/my
- VMSS/virtualMachines/2"
- tags:
- description:
- - Resource tags.
- returned: always
- type: dict
- sample: { 'tag1': 'abc' }
- instance_id:
- description:
- - Virtual Machine instance ID.
- returned: always
- type: str
- sample: 0
- name:
- description:
- - Virtual Machine name.
- returned: always
- type: str
- sample: myVMSS_2
- latest_model:
- description:
- - Whether applied latest model.
- returned: always
- type: bool
- sample: True
- provisioning_state:
- description:
- - Provisioning state of the Virtual Machine.
- returned: always
- type: str
- sample: Succeeded
- power_state:
- description:
- - Provisioning state of the Virtual Machine's power.
- returned: always
- type: str
- sample: running
- vm_id:
- description:
- - Virtual Machine ID
- returned: always
- type: str
- sample: 94a141a9-4530-46ac-b151-2c7ff09aa823
- image_reference:
- description:
- - Image reference
- returned: always
- type: dict
- sample: { "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myGroup/providers/Microsoft.Compute/galleries/
- myGallery/images/myImage/versions/10.1.3"}
-'''
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from azure.mgmt.compute import ComputeManagementClient
- from msrest.serialization import Model
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-
-class AzureRMVirtualMachineScaleSetVMInfo(AzureRMModuleBase):
- def __init__(self):
- # define user inputs into argument
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- vmss_name=dict(
- type='str',
- required=True
- ),
- instance_id=dict(
- type='str'
- ),
- tags=dict(
- type='list'
- )
- )
- # store the results of the module operation
- self.results = dict(
- changed=False
- )
- self.mgmt_client = None
- self.resource_group = None
- self.vmss_name = None
- self.instance_id = None
- self.tags = None
- super(AzureRMVirtualMachineScaleSetVMInfo, self).__init__(self.module_arg_spec, supports_tags=False)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_virtualmachinescalesetinstance_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_virtualmachinescalesetinstance_facts' module has been renamed to" +
- " 'azure_rm_virtualmachinescalesetinstance_info'",
- version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
- self.mgmt_client = self.get_mgmt_svc_client(ComputeManagementClient,
- base_url=self._cloud_environment.endpoints.resource_manager)
-
- if (self.instance_id is None):
- self.results['instances'] = self.list()
- else:
- self.results['instances'] = self.get()
- return self.results
-
- def get(self):
- response = None
- results = []
- try:
- response = self.mgmt_client.virtual_machine_scale_set_vms.get(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- instance_id=self.instance_id)
- self.log("Response : {0}".format(response))
- except CloudError as e:
- self.log('Could not get facts for Virtual Machine Scale Set VM.')
-
- if response and self.has_tags(response.tags, self.tags):
- results.append(self.format_response(response))
-
- return results
-
- def list(self):
- items = None
- try:
- items = self.mgmt_client.virtual_machine_scale_set_vms.list(resource_group_name=self.resource_group,
- virtual_machine_scale_set_name=self.vmss_name)
- self.log("Response : {0}".format(items))
- except CloudError as e:
- self.log('Could not get facts for Virtual Machine ScaleSet VM.')
-
- results = []
- for item in items:
- if self.has_tags(item.tags, self.tags):
- results.append(self.format_response(item))
- return results
-
- def format_response(self, item):
- d = item.as_dict()
-
- iv = self.mgmt_client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name=self.resource_group,
- vm_scale_set_name=self.vmss_name,
- instance_id=d.get('instance_id', None)).as_dict()
- power_state = ""
- for index in range(len(iv['statuses'])):
- code = iv['statuses'][index]['code'].split('/')
- if code[0] == 'PowerState':
- power_state = code[1]
- break
- d = {
- 'resource_group': self.resource_group,
- 'id': d.get('id', None),
- 'tags': d.get('tags', None),
- 'instance_id': d.get('instance_id', None),
- 'latest_model': d.get('latest_model_applied', None),
- 'name': d.get('name', None),
- 'provisioning_state': d.get('provisioning_state', None),
- 'power_state': power_state,
- 'vm_id': d.get('vm_id', None),
- 'image_reference': d.get('storage_profile').get('image_reference', None),
- 'computer_name': d.get('os_profile').get('computer_name', None)
- }
- return d
-
-
-def main():
- AzureRMVirtualMachineScaleSetVMInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py
deleted file mode 100644
index 9515b2a389..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py
+++ /dev/null
@@ -1,394 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_virtualnetwork
-version_added: "2.1"
-short_description: Manage Azure virtual networks
-description:
- - Create, update or delete a virtual networks. Allows setting and updating the available IPv4 address ranges
- and setting custom DNS servers. Use the M(azure_rm_subnet) module to associate subnets with a virtual network.
-options:
- resource_group:
- description:
- - Name of resource group.
- required: true
- address_prefixes_cidr:
- description:
- - List of IPv4 address ranges where each is formatted using CIDR notation.
- - Required when creating a new virtual network or using I(purge_address_prefixes).
- aliases:
- - address_prefixes
- dns_servers:
- description:
- - Custom list of DNS servers. Maximum length of two.
- - The first server in the list will be treated as the Primary server. This is an explicit list.
- - Existing DNS servers will be replaced with the specified list.
- - Use the I(purge_dns_servers) option to remove all custom DNS servers and revert to default Azure servers.
- location:
- description:
- - Valid Azure location. Defaults to location of the resource group.
- name:
- description:
- - Name of the virtual network.
- required: true
- purge_address_prefixes:
- description:
- - Use with I(state=present) to remove any existing I(address_prefixes).
- type: bool
- default: 'no'
- aliases:
- - purge
- purge_dns_servers:
- description:
- - Use with I(state=present) to remove existing DNS servers, reverting to default Azure servers. Mutually exclusive with DNS servers.
- type: bool
- default: 'no'
- state:
- description:
- - State of the virtual network. Use C(present) to create or update and C(absent) to delete.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Chris Houseknecht (@chouseknecht)
- - Matt Davis (@nitzmahone)
-
-'''
-
-EXAMPLES = '''
- - name: Create a virtual network
- azure_rm_virtualnetwork:
- resource_group: myResourceGroup
- name: myVirtualNetwork
- address_prefixes_cidr:
- - "10.1.0.0/16"
- - "172.100.0.0/16"
- dns_servers:
- - "127.0.0.1"
- - "127.0.0.2"
- tags:
- testing: testing
- delete: on-exit
-
- - name: Delete a virtual network
- azure_rm_virtualnetwork:
- resource_group: myResourceGroup
- name: myVirtualNetwork
- state: absent
-'''
-RETURN = '''
-state:
- description:
- - Current state of the virtual network.
- returned: always
- type: complex
- contains:
- address_prefixes:
- description:
- - The virtual network IPv4 address ranges.
- returned: always
- type: list
- sample: [
- "10.1.0.0/16",
- "172.100.0.0/16"
- ]
- dns_servers:
- description:
- - DNS servers.
- returned: always
- type: list
- sample: [
- "127.0.0.1",
- "127.0.0.3"
- ]
- etag:
- description:
- - A unique read-only string that changes whenever the resource is update.
- returned: always
- type: str
- sample: 'W/"0712e87c-f02f-4bb3-8b9e-2da0390a3886"'
- id:
- description:
- - Resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/
- Microsoft.Network/virtualNetworks/myVirtualNetwork"
- location:
- description:
- - The Geo-location where the resource lives.
- returned: always
- type: str
- sample: eastus
- name:
- description:
- - Resource name.
- returned: always
- type: str
- sample: my_test_network
- provisioning_state:
- description:
- - Provisioning state of the virtual network.
- returned: always
- type: str
- sample: Succeeded
- tags:
- description:
- - Resource tags, such as { 'tags1':'value1' }
- returned: always
- type: dict
- sample: { 'key1':'value1' }
- type:
- description:
- - Resource type.
- returned: always
- type: str
- sample: Microsoft.Network/virtualNetworks
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, CIDR_PATTERN
-
-
-def virtual_network_to_dict(vnet):
- '''
- Convert a virtual network object to a dict.
- :param vnet: VirtualNet object
- :return: dict
- '''
- results = dict(
- id=vnet.id,
- name=vnet.name,
- location=vnet.location,
- type=vnet.type,
- tags=vnet.tags,
- provisioning_state=vnet.provisioning_state,
- etag=vnet.etag
- )
- if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:
- results['dns_servers'] = []
- for server in vnet.dhcp_options.dns_servers:
- results['dns_servers'].append(server)
- if vnet.address_space and len(vnet.address_space.address_prefixes) > 0:
- results['address_prefixes'] = []
- for space in vnet.address_space.address_prefixes:
- results['address_prefixes'].append(space)
- return results
-
-
-class AzureRMVirtualNetwork(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- location=dict(type='str'),
- address_prefixes_cidr=dict(type='list', aliases=['address_prefixes']),
- dns_servers=dict(type='list',),
- purge_address_prefixes=dict(type='bool', default=False, aliases=['purge']),
- purge_dns_servers=dict(type='bool', default=False),
- )
-
- mutually_exclusive = [
- ('dns_servers', 'purge_dns_servers')
- ]
-
- required_if = [
- ('purge_address_prefixes', True, ['address_prefixes_cidr'])
- ]
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.location = None
- self.address_prefixes_cidr = None
- self.purge_address_prefixes = None
- self.dns_servers = None
- self.purge_dns_servers = None
-
- self.results = dict(
- changed=False,
- state=dict()
- )
-
- super(AzureRMVirtualNetwork, self).__init__(self.module_arg_spec,
- mutually_exclusive=mutually_exclusive,
- required_if=required_if,
- supports_check_mode=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- self.results['check_mode'] = self.check_mode
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- # Set default location
- self.location = resource_group.location
-
- if self.state == 'present' and self.purge_address_prefixes:
- for prefix in self.address_prefixes_cidr:
- if not CIDR_PATTERN.match(prefix):
- self.fail("Parameter error: invalid address prefix value {0}".format(prefix))
-
- if self.dns_servers and len(self.dns_servers) > 2:
- self.fail("Parameter error: You can provide a maximum of 2 DNS servers.")
-
- changed = False
- results = dict()
-
- try:
- self.log('Fetching vnet {0}'.format(self.name))
- vnet = self.network_client.virtual_networks.get(self.resource_group, self.name)
-
- results = virtual_network_to_dict(vnet)
- self.log('Vnet exists {0}'.format(self.name))
- self.log(results, pretty_print=True)
- self.check_provisioning_state(vnet, self.state)
-
- if self.state == 'present':
- if self.address_prefixes_cidr:
- existing_address_prefix_set = set(vnet.address_space.address_prefixes)
- requested_address_prefix_set = set(self.address_prefixes_cidr)
- missing_prefixes = requested_address_prefix_set - existing_address_prefix_set
- extra_prefixes = existing_address_prefix_set - requested_address_prefix_set
- if len(missing_prefixes) > 0:
- self.log('CHANGED: there are missing address_prefixes')
- changed = True
- if not self.purge_address_prefixes:
- # add the missing prefixes
- for prefix in missing_prefixes:
- results['address_prefixes'].append(prefix)
-
- if len(extra_prefixes) > 0 and self.purge_address_prefixes:
- self.log('CHANGED: there are address_prefixes to purge')
- changed = True
- # replace existing address prefixes with requested set
- results['address_prefixes'] = self.address_prefixes_cidr
-
- update_tags, results['tags'] = self.update_tags(results['tags'])
- if update_tags:
- changed = True
-
- if self.dns_servers:
- existing_dns_set = set(vnet.dhcp_options.dns_servers) if vnet.dhcp_options else set([])
- requested_dns_set = set(self.dns_servers)
- if existing_dns_set != requested_dns_set:
- self.log('CHANGED: replacing DNS servers')
- changed = True
- results['dns_servers'] = self.dns_servers
-
- if self.purge_dns_servers and vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:
- self.log('CHANGED: purging existing DNS servers')
- changed = True
- results['dns_servers'] = []
- elif self.state == 'absent':
- self.log("CHANGED: vnet exists but requested state is 'absent'")
- changed = True
- except CloudError:
- self.log('Vnet {0} does not exist'.format(self.name))
- if self.state == 'present':
- self.log("CHANGED: vnet {0} does not exist but requested state is 'present'".format(self.name))
- changed = True
-
- self.results['changed'] = changed
- self.results['state'] = results
-
- if self.check_mode:
- return self.results
-
- if changed:
- if self.state == 'present':
- if not results:
- # create a new virtual network
- self.log("Create virtual network {0}".format(self.name))
- if not self.address_prefixes_cidr:
- self.fail('Parameter error: address_prefixes_cidr required when creating a virtual network')
- vnet_param = self.network_models.VirtualNetwork(
- location=self.location,
- address_space=self.network_models.AddressSpace(
- address_prefixes=self.address_prefixes_cidr
- )
- )
- if self.dns_servers:
- vnet_param.dhcp_options = self.network_models.DhcpOptions(
- dns_servers=self.dns_servers
- )
- if self.tags:
- vnet_param.tags = self.tags
- self.results['state'] = self.create_or_update_vnet(vnet_param)
- else:
- # update existing virtual network
- self.log("Update virtual network {0}".format(self.name))
- vnet_param = self.network_models.VirtualNetwork(
- location=results['location'],
- address_space=self.network_models.AddressSpace(
- address_prefixes=results['address_prefixes']
- ),
- tags=results['tags'],
- subnets=vnet.subnets
- )
- if results.get('dns_servers'):
- vnet_param.dhcp_options = self.network_models.DhcpOptions(
- dns_servers=results['dns_servers']
- )
- self.results['state'] = self.create_or_update_vnet(vnet_param)
- elif self.state == 'absent':
- self.delete_virtual_network()
- self.results['state']['status'] = 'Deleted'
-
- return self.results
-
- def create_or_update_vnet(self, vnet):
- try:
- poller = self.network_client.virtual_networks.create_or_update(self.resource_group, self.name, vnet)
- new_vnet = self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error creating or updating virtual network {0} - {1}".format(self.name, str(exc)))
- return virtual_network_to_dict(new_vnet)
-
- def delete_virtual_network(self):
- try:
- poller = self.network_client.virtual_networks.delete(self.resource_group, self.name)
- result = self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error deleting virtual network {0} - {1}".format(self.name, str(exc)))
- return result
-
-
-def main():
- AzureRMVirtualNetwork()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_info.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_info.py
deleted file mode 100644
index d52ff1bfe5..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_info.py
+++ /dev/null
@@ -1,338 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Chris Houseknecht, <house@redhat.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_virtualnetwork_info
-
-version_added: "2.9"
-
-short_description: Get virtual network facts
-
-description:
- - Get facts for a specific virtual network or all virtual networks within a resource group.
-
-options:
- name:
- description:
- - Only show results for a specific security group.
- resource_group:
- description:
- - Limit results by resource group. Required when filtering by name.
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Chris Houseknecht (@chouseknecht)
- - Matt Davis (@nitzmahone)
-
-'''
-
-EXAMPLES = '''
- - name: Get facts for one virtual network
- azure_rm_virtualnetwork_info:
- resource_group: myResourceGroup
- name: secgroup001
-
- - name: Get facts for all virtual networks
- azure_rm_virtualnetwork_info:
- resource_group: myResourceGroup
-
- - name: Get facts by tags
- azure_rm_virtualnetwork_info:
- tags:
- - testing
-'''
-RETURN = '''
-azure_virtualnetworks:
- description:
- - List of virtual network dicts.
- returned: always
- type: list
- example: [{
- "etag": 'W/"532ba1be-ae71-40f2-9232-3b1d9cf5e37e"',
- "id": "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet2001",
- "location": "eastus2",
- "name": "vnet2001",
- "properties": {
- "addressSpace": {
- "addressPrefixes": [
- "10.10.0.0/16"
- ]
- },
- "provisioningState": "Succeeded",
- "resourceGuid": "a7ba285f-f7e7-4e17-992a-de4d39f28612",
- "subnets": []
- },
- "type": "Microsoft.Network/virtualNetworks"
- }]
-virtualnetworks:
- description:
- - List of virtual network dicts with same format as M(azure_rm_virtualnetwork) module parameters.
- returned: always
- type: complex
- contains:
- id:
- description:
- - Resource ID of the virtual network.
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/vnet2001
- returned: always
- type: str
- address_prefixes:
- description:
- - List of IPv4 address ranges where each is formatted using CIDR notation.
- sample: ["10.10.0.0/16"]
- returned: always
- type: list
- dns_servers:
- description:
- - Custom list of DNS servers.
- returned: always
- type: list
- sample: ["www.azure.com"]
- location:
- description:
- - Valid Azure location.
- returned: always
- type: str
- sample: eastus
- tags:
- description:
- - Tags assigned to the resource. Dictionary of string:string pairs.
- returned: always
- type: dict
- sample: { "tag1": "abc" }
- provisioning_state:
- description:
- - Provisioning state of the resource.
- returned: always
- sample: Succeeded
- type: str
- name:
- description:
- - Name of the virtual network.
- returned: always
- type: str
- sample: foo
- subnets:
- description:
- - Subnets associated with the virtual network.
- returned: always
- type: list
- contains:
- id:
- description:
- - Resource ID of the subnet.
- returned: always
- type: str
- sample: "/subscriptions/f64d4ee8-be94-457d-ba26-3fa6b6506cef/resourceGroups/v-xisuRG/providers/
- Microsoft.Network/virtualNetworks/vnetb57dc95232/subnets/vnetb57dc95232"
- name:
- description:
- - Name of the subnet.
- returned: always
- type: str
- sample: vnetb57dc95232
- provisioning_state:
- description:
- - Provisioning state of the subnet.
- returned: always
- type: str
- sample: Succeeded
- address_prefix:
- description:
- - The address prefix for the subnet.
- returned: always
- type: str
- sample: '10.1.0.0/16'
- network_security_group:
- description:
- - Existing security group ID with which to associate the subnet.
- returned: always
- type: str
- sample: null
- route_table:
- description:
- - The reference of the RouteTable resource.
- returned: always
- type: str
- sample: null
- service_endpoints:
- description:
- - An array of service endpoints.
- returned: always
- type: list
- sample: [
- {
- "locations": [
- "southeastasia",
- "eastasia"
- ],
- "service": "Microsoft.Storage"
- }
- ]
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-
-AZURE_OBJECT_CLASS = 'VirtualNetwork'
-
-
-class AzureRMNetworkInterfaceInfo(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str'),
- resource_group=dict(type='str'),
- tags=dict(type='list'),
- )
-
- self.results = dict(
- changed=False,
- virtualnetworks=[]
- )
-
- self.name = None
- self.resource_group = None
- self.tags = None
-
- super(AzureRMNetworkInterfaceInfo, self).__init__(self.module_arg_spec,
- supports_tags=False,
- facts_module=True)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_virtualnetwork_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_virtualnetwork_facts' module has been renamed to 'azure_rm_virtualnetwork_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name is not None:
- results = self.get_item()
- elif self.resource_group is not None:
- results = self.list_resource_group()
- else:
- results = self.list_items()
-
- if is_old_facts:
- self.results['ansible_facts'] = {
- 'azure_virtualnetworks': self.serialize(results)
- }
- self.results['virtualnetworks'] = self.curated(results)
-
- return self.results
-
- def get_item(self):
- self.log('Get properties for {0}'.format(self.name))
- item = None
- results = []
-
- try:
- item = self.network_client.virtual_networks.get(self.resource_group, self.name)
- except CloudError:
- pass
-
- if item and self.has_tags(item.tags, self.tags):
- results = [item]
- return results
-
- def list_resource_group(self):
- self.log('List items for resource group')
- try:
- response = self.network_client.virtual_networks.list(self.resource_group)
- except CloudError as exc:
- self.fail("Failed to list for resource group {0} - {1}".format(self.resource_group, str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(item)
- return results
-
- def list_items(self):
- self.log('List all for items')
- try:
- response = self.network_client.virtual_networks.list_all()
- except CloudError as exc:
- self.fail("Failed to list all items - {0}".format(str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- results.append(item)
- return results
-
- def serialize(self, raws):
- self.log("Serialize all items")
- return [self.serialize_obj(item, AZURE_OBJECT_CLASS) for item in raws] if raws else []
-
- def curated(self, raws):
- self.log("Format all items")
- return [self.virtualnetwork_to_dict(x) for x in raws] if raws else []
-
- def virtualnetwork_to_dict(self, vnet):
- results = dict(
- id=vnet.id,
- name=vnet.name,
- location=vnet.location,
- tags=vnet.tags,
- provisioning_state=vnet.provisioning_state
- )
- if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:
- results['dns_servers'] = []
- for server in vnet.dhcp_options.dns_servers:
- results['dns_servers'].append(server)
- if vnet.address_space and len(vnet.address_space.address_prefixes) > 0:
- results['address_prefixes'] = []
- for space in vnet.address_space.address_prefixes:
- results['address_prefixes'].append(space)
- if vnet.subnets and len(vnet.subnets) > 0:
- results['subnets'] = [self.subnet_to_dict(x) for x in vnet.subnets]
- return results
-
- def subnet_to_dict(self, subnet):
- result = dict(
- id=subnet.id,
- name=subnet.name,
- provisioning_state=subnet.provisioning_state,
- address_prefix=subnet.address_prefix,
- network_security_group=subnet.network_security_group.id if subnet.network_security_group else None,
- route_table=subnet.route_table.id if subnet.route_table else None
- )
- if subnet.service_endpoints:
- result['service_endpoints'] = [{'service': item.service, 'locations': item.locations} for item in subnet.service_endpoints]
- return result
-
-
-def main():
- AzureRMNetworkInterfaceInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkgateway.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkgateway.py
deleted file mode 100644
index 87183d0127..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkgateway.py
+++ /dev/null
@@ -1,383 +0,0 @@
-#!/usr/bin/python
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'certified'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_virtualnetworkgateway
-
-version_added: "2.8"
-
-short_description: Manage Azure virtual network gateways
-
-description:
- - Create, update or delete a virtual network gateway(VPN Gateway).
- - When creating a VPN Gateway you must provide the name of an existing virtual network.
-
-options:
- resource_group:
- description:
- - Name of a resource group where VPN Gateway exists or will be created.
- required: true
- name:
- description:
- - Name of VPN Gateway.
- required: true
- state:
- description:
- - State of the VPN Gateway. Use C(present) to create or update VPN gateway and C(absent) to delete VPN gateway.
- default: present
- choices:
- - absent
- - present
- required: false
- location:
- description:
- - Valid Azure location. Defaults to location of the resource group.
- required: false
- virtual_network:
- description:
- - An existing virtual network with which the VPN Gateway will be associated.
- - Required when creating a VPN Gateway.
- - Can be the name of the virtual network.
- - Must be in the same resource group as VPN gateway when specified by name.
- - Can be the resource ID of the virtual network.
- - Can be a dict which contains I(name) and I(resource_group) of the virtual network.
- aliases:
- - virtual_network_name
- required: true
- ip_configurations:
- description:
- - List of IP configurations.
- suboptions:
- name:
- description:
- - Name of the IP configuration.
- required: true
- private_ip_allocation_method:
- description:
- - Private IP allocation method.
- choices:
- - dynamic
- - static
- default: dynamic
- public_ip_address_name:
- description:
- - Name of the public IP address. Use 'None' to disable the public IP address.
- subnet:
- description:
- - ID of the gateway subnet for VPN.
- default: GatewaySubnet
- gateway_type:
- description:
- - The type of this virtual network gateway.
- default: vpn
- choices:
- - vpn
- - express_route
- vpn_type:
- description:
- - The type of this virtual private network.
- default: route_based
- choices:
- - route_based
- - policy_based
- enable_bgp:
- description:
- - Whether BGP is enabled for this virtual network gateway or not.
- default: false
- sku:
- description:
- - The reference of the VirtualNetworkGatewaySku resource which represents the SKU selected for Virtual network gateway.
- default: VpnGw1
- choices:
- - VpnGw1
- - VpnGw2
- - VpnGw3
- bgp_settings:
- description:
- - Virtual network gateway's BGP speaker settings.
- suboptions:
- asn:
- description:
- - The BGP speaker's ASN.
- required: True
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Madhura Naniwadekar (@Madhura-CSI)
-'''
-
-EXAMPLES = '''
- - name: Create virtual network gateway without bgp settings
- azure_rm_virtualnetworkgateway:
- resource_group: myResourceGroup
- name: myVirtualNetworkGateway
- ip_configurations:
- - name: testipconfig
- private_ip_allocation_method: Dynamic
- public_ip_address_name: testipaddr
- virtual_network: myVirtualNetwork
- tags:
- common: "xyz"
-
- - name: Create virtual network gateway with bgp
- azure_rm_virtualnetworkgateway:
- resource_group: myResourceGroup
- name: myVirtualNetworkGateway
- sku: vpn_gw1
- ip_configurations:
- - name: testipconfig
- private_ip_allocation_method: Dynamic
- public_ip_address_name: testipaddr
- enable_bgp: yes
- virtual_network: myVirtualNetwork
- bgp_settings:
- asn: 65515
- bgp_peering_address: "169.254.54.209"
- tags:
- common: "xyz"
-
- - name: Delete instance of virtual network gateway
- azure_rm_virtualnetworkgateway:
- resource_group: myResourceGroup
- name: myVirtualNetworkGateway
- state: absent
-'''
-
-RETURN = '''
-id:
- description:
- - Virtual Network Gateway resource ID.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworkGateways/myV
- irtualNetworkGateway"
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, CIDR_PATTERN
-from ansible.module_utils.common.dict_transformations import _snake_to_camel
-
-
-AZURE_VPN_GATEWAY_OBJECT_CLASS = 'VirtualNetworkGateway'
-
-
-ip_configuration_spec = dict(
- name=dict(type='str', required=True),
- private_ip_allocation_method=dict(type='str', choices=['Dynamic', 'Static'], default='Dynamic'),
- subnet=dict(type='str'),
- public_ip_address_name=dict(type='str'),
-)
-
-
-sku_spec = dict(
- name=dict(type='str', default='VpnGw1'),
- tier=dict(type='str', default='VpnGw1')
-)
-
-
-bgp_spec = dict(
- asn=dict(type='int', required=True),
-)
-
-
-def vgw_to_dict(vgw):
- results = dict(
- id=vgw.id,
- name=vgw.name,
- location=vgw.location,
- gateway_type=vgw.gateway_type,
- vpn_type=vgw.vpn_type,
- enable_bgp=vgw.enable_bgp,
- tags=vgw.tags,
- provisioning_state=vgw.provisioning_state,
- sku=dict(
- name=vgw.sku.name,
- tier=vgw.sku.tier
- ),
- bgp_settings=dict(
- asn=vgw.bgp_settings.asn,
- bgp_peering_address=vgw.bgp_settings.bgp_peering_address,
- peer_weight=vgw.bgp_settings.peer_weight
- ) if vgw.bgp_settings else None,
- etag=vgw.etag
- )
- return results
-
-
-class AzureRMVirtualNetworkGateway(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- resource_group=dict(type='str', required=True),
- name=dict(type='str', required=True),
- state=dict(type='str', default='present', choices=['present', 'absent']),
- location=dict(type='str'),
- ip_configurations=dict(type='list', default=None, elements='dict', options=ip_configuration_spec),
- gateway_type=dict(type='str', default='vpn', choices=['vpn', 'express_route']),
- vpn_type=dict(type='str', default='route_based', choices=['route_based', 'policy_based']),
- enable_bgp=dict(type='bool', default=False),
- sku=dict(default='VpnGw1', choices=['VpnGw1', 'VpnGw2', 'VpnGw3']),
- bgp_settings=dict(type='dict', options=bgp_spec),
- virtual_network=dict(type='raw', aliases=['virtual_network_name'])
- )
-
- self.resource_group = None
- self.name = None
- self.state = None
- self.location = None
- self.ip_configurations = None
- self.gateway_type = None
- self.vpn_type = None
- self.enable_bgp = None
- self.sku = None
- self.bgp_settings = None
-
- self.results = dict(
- changed=False,
- state=dict()
- )
-
- super(AzureRMVirtualNetworkGateway, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True)
-
- def exec_module(self, **kwargs):
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- setattr(self, key, kwargs[key])
-
- changed = False
- results = dict()
- vgw = None
-
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- self.virtual_network = self.parse_resource_to_dict(self.virtual_network)
- resource_group = self.get_resource_group(self.resource_group)
-
- try:
- vgw = self.network_client.virtual_network_gateways.get(self.resource_group, self.name)
- if self.state == 'absent':
- self.log("CHANGED: vnet exists but requested state is 'absent'")
- changed = True
- except CloudError:
- if self.state == 'present':
- self.log("CHANGED: VPN Gateway {0} does not exist but requested state is 'present'".format(self.name))
- changed = True
-
- if vgw:
- results = vgw_to_dict(vgw)
- if self.state == 'present':
- update_tags, results['tags'] = self.update_tags(results['tags'])
- if update_tags:
- changed = True
- sku = dict(name=self.sku, tier=self.sku)
- if sku != results['sku']:
- changed = True
- if self.enable_bgp != results['enable_bgp']:
- changed = True
- if self.bgp_settings and self.bgp_settings['asn'] != results['bgp_settings']['asn']:
- changed = True
-
- self.results['changed'] = changed
- self.results['id'] = results.get('id')
-
- if self.check_mode:
- return self.results
- if changed:
- if self.state == 'present':
- if not self.sku:
- self.fail('Parameter error: sku is required when creating a vpn gateway')
- if not self.ip_configurations:
- self.fail('Parameter error: ip_configurations required when creating a vpn gateway')
- subnet = self.network_models.SubResource(
- id='/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/virtualNetworks/{2}/subnets/GatewaySubnet'.format(
- self.virtual_network['subscription_id'],
- self.virtual_network['resource_group'],
- self.virtual_network['name']))
-
- public_ip_address = self.network_models.SubResource(
- id='/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Network/publicIPAddresses/{2}'.format(
- self.virtual_network['subscription_id'],
- self.virtual_network['resource_group'],
- self.ip_configurations[0]['public_ip_address_name']))
-
- vgw_ip_configurations = [
- self.network_models.VirtualNetworkGatewayIPConfiguration(
- private_ip_allocation_method=ip_config.get('private_ip_allocation_method'),
- subnet=subnet,
- public_ip_address=public_ip_address,
- name='default'
- ) for ip_config in self.ip_configurations
- ]
-
- vgw_sku = self.network_models.VirtualNetworkGatewaySku(
- name=self.sku,
- tier=self.sku
- )
-
- vgw_bgp_settings = self.network_models.BgpSettings(
- asn=self.bgp_settings.get('asn'),
- ) if self.bgp_settings else None
- vgw = self.network_models.VirtualNetworkGateway(
- location=self.location,
- ip_configurations=vgw_ip_configurations,
- gateway_type=_snake_to_camel(self.gateway_type, True),
- vpn_type=_snake_to_camel(self.vpn_type, True),
- enable_bgp=self.enable_bgp,
- sku=vgw_sku,
- bgp_settings=vgw_bgp_settings
- )
- if self.tags:
- vgw.tags = self.tags
- results = self.create_or_update_vgw(vgw)
-
- else:
- results = self.delete_vgw()
-
- if self.state == 'present':
- self.results['id'] = results.get('id')
- return self.results
-
- def create_or_update_vgw(self, vgw):
- try:
- poller = self.network_client.virtual_network_gateways.create_or_update(self.resource_group, self.name, vgw)
- new_vgw = self.get_poller_result(poller)
- return vgw_to_dict(new_vgw)
- except Exception as exc:
- self.fail("Error creating or updating virtual network gateway {0} - {1}".format(self.name, str(exc)))
-
- def delete_vgw(self):
- try:
- poller = self.network_client.virtual_network_gateways.delete(self.resource_group, self.name)
- self.get_poller_result(poller)
- except Exception as exc:
- self.fail("Error deleting virtual network gateway {0} - {1}".format(self.name, str(exc)))
- return True
-
-
-def main():
- AzureRMVirtualNetworkGateway()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkpeering.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkpeering.py
deleted file mode 100644
index 6093039c9e..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkpeering.py
+++ /dev/null
@@ -1,414 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yunge Zhu <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_virtualnetworkpeering
-version_added: "2.8"
-short_description: Manage Azure Virtual Network Peering
-description:
- - Create, update and delete Azure Virtual Network Peering.
-
-options:
- resource_group:
- description:
- - Name of a resource group where the vnet exists.
- required: true
- name:
- description:
- - Name of the virtual network peering.
- required: true
- virtual_network:
- description:
- - Name or resource ID of the virtual network to be peered.
- required: true
- remote_virtual_network:
- description:
- - Remote virtual network to be peered.
- - It can be name of remote virtual network in same resource group.
- - It can be remote virtual network resource ID.
- - It can be a dict which contains I(name) and I(resource_group) of remote virtual network.
- - Required when creating.
- allow_virtual_network_access:
- description:
- - Allows VMs in the remote VNet to access all VMs in the local VNet.
- type: bool
- default: false
- allow_forwarded_traffic:
- description:
- - Allows forwarded traffic from the VMs in the remote VNet.
- type: bool
- default: false
- use_remote_gateways:
- description:
- - If remote gateways can be used on this virtual network.
- type: bool
- default: false
- allow_gateway_transit:
- description:
- - Allows VNet to use the remote VNet's gateway. Remote VNet gateway must have --allow-gateway-transit enabled for remote peering.
- - Only 1 peering can have this flag enabled. Cannot be set if the VNet already has a gateway.
- type: bool
- default: false
- state:
- description:
- - State of the virtual network peering. Use C(present) to create or update a peering and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yunge Zhu (@yungezz)
-'''
-
-EXAMPLES = '''
- - name: Create virtual network peering
- azure_rm_virtualnetworkpeering:
- resource_group: myResourceGroup
- virtual_network: myVirtualNetwork
- name: myPeering
- remote_virtual_network:
- resource_group: mySecondResourceGroup
- name: myRemoteVirtualNetwork
- allow_virtual_network_access: false
- allow_forwarded_traffic: true
-
- - name: Delete the virtual network peering
- azure_rm_virtualnetworkpeering:
- resource_group: myResourceGroup
- virtual_network: myVirtualNetwork
- name: myPeering
- state: absent
-'''
-RETURN = '''
-id:
- description:
- - ID of the Azure virtual network peering.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVirtualN
- etwork/virtualNetworkPeerings/myPeering"
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrestazure.tools import is_valid_resource_id
- from msrest.polling import LROPoller
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase, format_resource_id
-
-
-def virtual_network_to_dict(vnet):
- '''
- Convert a virtual network object to a dict.
- '''
- results = dict(
- id=vnet.id,
- name=vnet.name,
- location=vnet.location,
- type=vnet.type,
- tags=vnet.tags,
- provisioning_state=vnet.provisioning_state,
- etag=vnet.etag
- )
- if vnet.dhcp_options and len(vnet.dhcp_options.dns_servers) > 0:
- results['dns_servers'] = []
- for server in vnet.dhcp_options.dns_servers:
- results['dns_servers'].append(server)
- if vnet.address_space and len(vnet.address_space.address_prefixes) > 0:
- results['address_prefixes'] = []
- for space in vnet.address_space.address_prefixes:
- results['address_prefixes'].append(space)
- return results
-
-
-def vnetpeering_to_dict(vnetpeering):
- '''
- Convert a virtual network peering object to a dict.
- '''
- results = dict(
- id=vnetpeering.id,
- name=vnetpeering.name,
- remote_virtual_network=vnetpeering.remote_virtual_network.id,
- remote_address_space=dict(
- address_prefixes=vnetpeering.remote_address_space.address_prefixes
- ),
- peering_state=vnetpeering.peering_state,
- provisioning_state=vnetpeering.provisioning_state,
- use_remote_gateways=vnetpeering.use_remote_gateways,
- allow_gateway_transit=vnetpeering.allow_gateway_transit,
- allow_forwarded_traffic=vnetpeering.allow_forwarded_traffic,
- allow_virtual_network_access=vnetpeering.allow_virtual_network_access,
- etag=vnetpeering.etag
- )
- return results
-
-
-class AzureRMVirtualNetworkPeering(AzureRMModuleBase):
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- virtual_network=dict(
- type='raw'
- ),
- remote_virtual_network=dict(
- type='raw'
- ),
- allow_virtual_network_access=dict(
- type='bool',
- default=False
- ),
- allow_forwarded_traffic=dict(
- type='bool',
- default=False
- ),
- allow_gateway_transit=dict(
- type='bool',
- default=False
- ),
- use_remote_gateways=dict(
- type='bool',
- default=False
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- self.resource_group = None
- self.name = None
- self.virtual_network = None
- self.remote_virtual_network = None
- self.allow_virtual_network_access = None
- self.allow_forwarded_traffic = None
- self.allow_gateway_transit = None
- self.use_remote_gateways = None
-
- self.results = dict(changed=False)
-
- super(AzureRMVirtualNetworkPeering, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_check_mode=True,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()):
- setattr(self, key, kwargs[key])
-
- to_be_updated = False
-
- resource_group = self.get_resource_group(self.resource_group)
-
- # parse virtual_network
- self.virtual_network = self.parse_resource_to_dict(self.virtual_network)
- if self.virtual_network['resource_group'] != self.resource_group:
- self.fail('Resource group of virtual_network is not same as param resource_group')
-
- # parse remote virtual_network
- self.remote_virtual_network = self.format_vnet_id(self.remote_virtual_network)
-
- # get vnet peering
- response = self.get_vnet_peering()
-
- if self.state == 'present':
- if response:
- # check vnet id not changed
- existing_vnet = self.parse_resource_to_dict(response['id'])
- if existing_vnet['resource_group'] != self.virtual_network['resource_group'] or \
- existing_vnet['name'] != self.virtual_network['name']:
- self.fail("Cannot update virtual_network of Virtual Network Peering!")
-
- # check remote vnet id not changed
- if response['remote_virtual_network'].lower() != self.remote_virtual_network.lower():
- self.fail("Cannot update remote_virtual_network of Virtual Network Peering!")
-
- # check if update
- if response['peering_state'] == 'Disconnected':
- to_be_updated = True
- else:
- to_be_updated = self.check_update(response)
-
- else:
- # not exists, create new vnet peering
- to_be_updated = True
-
- # check if vnet exists
- virtual_network = self.get_vnet(self.virtual_network['resource_group'], self.virtual_network['name'])
- if not virtual_network:
- self.fail("Virtual network {0} in resource group {1} does not exist!".format(
- self.virtual_network['name'], self.virtual_network['resource_group']))
-
- elif self.state == 'absent':
- if response:
- self.log('Delete Azure Virtual Network Peering')
- self.results['changed'] = True
- self.results['id'] = response['id']
-
- if self.check_mode:
- return self.results
-
- response = self.delete_vnet_peering()
-
- else:
- self.fail("Azure Virtual Network Peering {0} does not exist in resource group {1}".format(self.name, self.resource_group))
-
- if to_be_updated:
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- response = self.create_or_update_vnet_peering()
- self.results['id'] = response['id']
-
- return self.results
-
- def format_vnet_id(self, vnet):
- if not vnet:
- return vnet
- if isinstance(vnet, dict) and vnet.get('name') and vnet.get('resource_group'):
- remote_vnet_id = format_resource_id(vnet['name'],
- self.subscription_id,
- 'Microsoft.Network',
- 'virtualNetworks',
- vnet['resource_group'])
- elif isinstance(vnet, str):
- if is_valid_resource_id(vnet):
- remote_vnet_id = vnet
- else:
- remote_vnet_id = format_resource_id(vnet,
- self.subscription_id,
- 'Microsoft.Network',
- 'virtualNetworks',
- self.resource_group)
- else:
- self.fail("remote_virtual_network could be a valid resource id, dict of name and resource_group, name of virtual network in same resource group.")
- return remote_vnet_id
-
- def check_update(self, exisiting_vnet_peering):
- if self.allow_forwarded_traffic != exisiting_vnet_peering['allow_forwarded_traffic']:
- return True
- if self.allow_gateway_transit != exisiting_vnet_peering['allow_gateway_transit']:
- return True
- if self.allow_virtual_network_access != exisiting_vnet_peering['allow_virtual_network_access']:
- return True
- if self.use_remote_gateways != exisiting_vnet_peering['use_remote_gateways']:
- return True
- return False
-
- def get_vnet(self, resource_group, vnet_name):
- '''
- Get Azure Virtual Network
- :return: deserialized Azure Virtual Network
- '''
- self.log("Get the Azure Virtual Network {0}".format(vnet_name))
- vnet = self.network_client.virtual_networks.get(resource_group, vnet_name)
-
- if vnet:
- results = virtual_network_to_dict(vnet)
- return results
- return False
-
- def create_or_update_vnet_peering(self):
- '''
- Creates or Update Azure Virtual Network Peering.
-
- :return: deserialized Azure Virtual Network Peering instance state dictionary
- '''
- self.log("Creating or Updating the Azure Virtual Network Peering {0}".format(self.name))
-
- vnet_id = format_resource_id(self.virtual_network['name'],
- self.subscription_id,
- 'Microsoft.Network',
- 'virtualNetworks',
- self.virtual_network['resource_group'])
- peering = self.network_models.VirtualNetworkPeering(
- id=vnet_id,
- name=self.name,
- remote_virtual_network=self.network_models.SubResource(id=self.remote_virtual_network),
- allow_virtual_network_access=self.allow_virtual_network_access,
- allow_gateway_transit=self.allow_gateway_transit,
- allow_forwarded_traffic=self.allow_forwarded_traffic,
- use_remote_gateways=self.use_remote_gateways)
-
- try:
- response = self.network_client.virtual_network_peerings.create_or_update(self.resource_group,
- self.virtual_network['name'],
- self.name,
- peering)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
- return vnetpeering_to_dict(response)
- except CloudError as exc:
- self.fail("Error creating Azure Virtual Network Peering: {0}.".format(exc.message))
-
- def delete_vnet_peering(self):
- '''
- Deletes the specified Azure Virtual Network Peering
-
- :return: True
- '''
- self.log("Deleting Azure Virtual Network Peering {0}".format(self.name))
- try:
- poller = self.network_client.virtual_network_peerings.delete(
- self.resource_group, self.virtual_network['name'], self.name)
- self.get_poller_result(poller)
- return True
- except CloudError as e:
- self.fail("Error deleting the Azure Virtual Network Peering: {0}".format(e.message))
- return False
-
- def get_vnet_peering(self):
- '''
- Gets the Virtual Network Peering.
-
- :return: deserialized Virtual Network Peering
- '''
- self.log(
- "Checking if Virtual Network Peering {0} is present".format(self.name))
- try:
- response = self.network_client.virtual_network_peerings.get(self.resource_group,
- self.virtual_network['name'],
- self.name)
- self.log("Response : {0}".format(response))
- return vnetpeering_to_dict(response)
- except CloudError:
- self.log('Did not find the Virtual Network Peering.')
- return False
-
-
-def main():
- """Main execution"""
- AzureRMVirtualNetworkPeering()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkpeering_info.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkpeering_info.py
deleted file mode 100644
index 4548894cda..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkpeering_info.py
+++ /dev/null
@@ -1,256 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2019 Yunge Zhu (@yungezz)
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = '''
----
-module: azure_rm_virtualnetworkpeering_info
-version_added: "2.9"
-short_description: Get facts of Azure Virtual Network Peering
-description:
- - Get facts of Azure Virtual Network Peering.
-
-options:
- resource_group:
- description:
- - Name of a resource group where the vnet exists.
- required: True
- virtual_network:
- description:
- - Name or resource ID of a virtual network.
- required: True
- name:
- description:
- - Name of the virtual network peering.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yunge Zhu (@yungezz)
-'''
-
-EXAMPLES = '''
- - name: Get virtual network peering by name
- azure_rm_virtualnetworkpeering_info:
- resource_group: myResourceGroup
- virtual_network: myVnet1
- name: myVnetPeer
-
- - name: List virtual network peering of virtual network
- azure_rm_virtualnetworkpeering:
- resource_group: myResourceGroup
- virtual_network: myVnet1
-'''
-
-RETURN = '''
-vnetpeerings:
- description:
- - A list of Virtual Network Peering facts.
- returned: always
- type: complex
- contains:
- id:
- description: ID of current Virtual Network peering.
- returned: always
- type: str
- sample:
- "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVnet/virtualNetworkPeerings/peer1"
- name:
- description:
- - Name of Virtual Network peering.
- returned: always
- type: str
- sample: myPeering
- remote_virtual_network:
- description:
- - ID of remote Virtual Network to be peered to.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVnet2
- remote_address_space:
- description:
- - The reference of the remote Virtual Network address space.
- type: complex
- returned: always
- contains:
- address_prefixes:
- description:
- - A list of address blocks reserved for this Virtual Network in CIDR notation.
- returned: always
- type: list
- sample: 10.1.0.0/16
- peering_state:
- description:
- - The state of the virtual network peering.
- returned: always
- type: str
- sample: Connected
- provisioning_state:
- description:
- - The provisioning state of the resource.
- returned: always
- type: str
- sample: Succeeded
- allow_forwarded_traffic:
- description:
- - Whether forwarded traffic from the VMs in the remote Virtual Network will be allowed/disallowed.
- returned: always
- type: bool
- sample: False
- allow_gateway_transit:
- description:
- - Whether gateway links can be used in remote Virtual Networking to link to this Virtual Network.
- returned: always
- type: bool
- sample: False
- allow_virtual_network_access:
- description:
- - Whether the VMs in the linked Virtual Network space can access all the VMs in local Virtual Network space.
- returned: always
- type: bool
- sample: False
- use_remote_gateways:
- description:
- - Whether remote gateways can be used on this Virtual Network.
- returned: always
- type: bool
- sample: False
-'''
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-
-def vnetpeering_to_dict(vnetpeering):
- '''
- Convert a virtual network peering object to a dict.
- '''
- results = dict(
- id=vnetpeering.id,
- name=vnetpeering.name,
- remote_virtual_network=vnetpeering.remote_virtual_network.id,
- remote_address_space=dict(
- address_prefixes=vnetpeering.remote_address_space.address_prefixes
- ),
- peering_state=vnetpeering.peering_state,
- provisioning_state=vnetpeering.provisioning_state,
- use_remote_gateways=vnetpeering.use_remote_gateways,
- allow_gateway_transit=vnetpeering.allow_gateway_transit,
- allow_forwarded_traffic=vnetpeering.allow_forwarded_traffic,
- allow_virtual_network_access=vnetpeering.allow_virtual_network_access
- )
- return results
-
-
-class AzureRMVirtualNetworkPeeringInfo(AzureRMModuleBase):
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str'
- ),
- virtual_network=dict(
- type='raw',
- required=True
- )
- )
-
- self.resource_group = None
- self.name = None
- self.virtual_network = None
-
- self.results = dict(changed=False)
-
- super(AzureRMVirtualNetworkPeeringInfo, self).__init__(derived_arg_spec=self.module_arg_spec,
- supports_tags=False)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
- is_old_facts = self.module._name == 'azure_rm_virtualnetworkpeering_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_virtualnetworkpeering_facts' module has been renamed to 'azure_rm_virtualnetworkpeering_info'", version='2.13')
-
- for key in list(self.module_arg_spec.keys()):
- setattr(self, key, kwargs[key])
-
- # parse virtual_network
- self.virtual_network = self.parse_resource_to_dict(self.virtual_network)
- if self.virtual_network['resource_group'] != self.resource_group:
- self.fail('Resource group of virtual_network is not same as param resource_group')
-
- self.results['vnetpeerings'] = []
- # get vnet peering
- if self.name:
- self.results['vnetpeerings'] = self.get_by_name()
- else:
- self.results['vnetpeerings'] = self.list_by_vnet()
-
- return self.results
-
- def get_by_name(self):
- '''
- Gets the Virtual Network Peering.
-
- :return: List of Virtual Network Peering
- '''
- self.log(
- "Get Virtual Network Peering {0}".format(self.name))
- results = []
- try:
- response = self.network_client.virtual_network_peerings.get(resource_group_name=self.resource_group,
- virtual_network_name=self.virtual_network['name'],
- virtual_network_peering_name=self.name)
- self.log("Response : {0}".format(response))
- results.append(vnetpeering_to_dict(response))
- except CloudError:
- self.log('Did not find the Virtual Network Peering.')
- return results
-
- def list_by_vnet(self):
- '''
- Lists the Virtual Network Peering in specific Virtual Network.
-
- :return: List of Virtual Network Peering
- '''
- self.log(
- "List Virtual Network Peering in Virtual Network {0}".format(self.virtual_network['name']))
- results = []
- try:
- response = self.network_client.virtual_network_peerings.list(resource_group_name=self.resource_group,
- virtual_network_name=self.virtual_network['name'])
- self.log("Response : {0}".format(response))
- if response:
- for p in response:
- results.append(vnetpeering_to_dict(p))
- except CloudError:
- self.log('Did not find the Virtual Network Peering.')
- return results
-
-
-def main():
- """Main execution"""
- AzureRMVirtualNetworkPeeringInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_webapp.py b/lib/ansible/modules/cloud/azure/azure_rm_webapp.py
deleted file mode 100644
index 4f185f4580..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_webapp.py
+++ /dev/null
@@ -1,1070 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_webapp
-version_added: "2.7"
-short_description: Manage Web App instances
-description:
- - Create, update and delete instance of Web App.
-
-options:
- resource_group:
- description:
- - Name of the resource group to which the resource belongs.
- required: True
- name:
- description:
- - Unique name of the app to create or update. To create or update a deployment slot, use the {slot} parameter.
- required: True
-
- location:
- description:
- - Resource location. If not set, location from the resource group will be used as default.
-
- plan:
- description:
- - App service plan. Required for creation.
- - Can be name of existing app service plan in same resource group as web app.
- - Can be the resource ID of an existing app service plan. For example
- /subscriptions/<subs_id>/resourceGroups/<resource_group>/providers/Microsoft.Web/serverFarms/<plan_name>.
- - Can be a dict containing five parameters, defined below.
- - C(name), name of app service plan.
- - C(resource_group), resource group of the app service plan.
- - C(sku), SKU of app service plan, allowed values listed on U(https://azure.microsoft.com/en-us/pricing/details/app-service/linux/).
- - C(is_linux), whether or not the app service plan is Linux. defaults to C(False).
- - C(number_of_workers), number of workers for app service plan.
-
- frameworks:
- description:
- - Set of run time framework settings. Each setting is a dictionary.
- - See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info.
- suboptions:
- name:
- description:
- - Name of the framework.
- - Supported framework list for Windows web app and Linux web app is different.
- - Windows web apps support C(java), C(net_framework), C(php), C(python), and C(node) from June 2018.
- - Windows web apps support multiple framework at the same time.
- - Linux web apps support C(java), C(ruby), C(php), C(dotnetcore), and C(node) from June 2018.
- - Linux web apps support only one framework.
- - Java framework is mutually exclusive with others.
- choices:
- - java
- - net_framework
- - php
- - python
- - ruby
- - dotnetcore
- - node
- version:
- description:
- - Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info.
- - C(net_framework) supported value sample, C(v4.0) for .NET 4.6 and C(v3.0) for .NET 3.5.
- - C(php) supported value sample, C(5.5), C(5.6), C(7.0).
- - C(python) supported value sample, C(5.5), C(5.6), C(7.0).
- - C(node) supported value sample, C(6.6), C(6.9).
- - C(dotnetcore) supported value sample, C(1.0), C(1.1), C(1.2).
- - C(ruby) supported value sample, C(2.3).
- - C(java) supported value sample, C(1.9) for Windows web app. C(1.8) for Linux web app.
- settings:
- description:
- - List of settings of the framework.
- suboptions:
- java_container:
- description:
- - Name of Java container.
- - Supported only when I(frameworks=java). Sample values C(Tomcat), C(Jetty).
- java_container_version:
- description:
- - Version of Java container.
- - Supported only when I(frameworks=java).
- - Sample values for C(Tomcat), C(8.0), C(8.5), C(9.0). For C(Jetty,), C(9.1), C(9.3).
-
- container_settings:
- description:
- - Web app container settings.
- suboptions:
- name:
- description:
- - Name of container, for example C(imagename:tag).
- registry_server_url:
- description:
- - Container registry server URL, for example C(mydockerregistry.io).
- registry_server_user:
- description:
- - The container registry server user name.
- registry_server_password:
- description:
- - The container registry server password.
-
- scm_type:
- description:
- - Repository type of deployment source, for example C(LocalGit), C(GitHub).
- - List of supported values maintained at U(https://docs.microsoft.com/en-us/rest/api/appservice/webapps/createorupdate#scmtype).
-
- deployment_source:
- description:
- - Deployment source for git.
- suboptions:
- url:
- description:
- - Repository url of deployment source.
-
- branch:
- description:
- - The branch name of the repository.
- startup_file:
- description:
- - The web's startup file.
- - Used only for Linux web apps.
-
- client_affinity_enabled:
- description:
- - Whether or not to send session affinity cookies, which route client requests in the same session to the same instance.
- type: bool
- default: True
-
- https_only:
- description:
- - Configures web site to accept only https requests.
- type: bool
-
- dns_registration:
- description:
- - Whether or not the web app hostname is registered with DNS on creation. Set to C(false) to register.
- type: bool
-
- skip_custom_domain_verification:
- description:
- - Whether or not to skip verification of custom (non *.azurewebsites.net) domains associated with web app. Set to C(true) to skip.
- type: bool
-
- ttl_in_seconds:
- description:
- - Time to live in seconds for web app default domain name.
-
- app_settings:
- description:
- - Configure web app application settings. Suboptions are in key value pair format.
-
- purge_app_settings:
- description:
- - Purge any existing application settings. Replace web app application settings with app_settings.
- type: bool
-
- app_state:
- description:
- - Start/Stop/Restart the web app.
- type: str
- choices:
- - started
- - stopped
- - restarted
- default: started
-
- state:
- description:
- - State of the Web App.
- - Use C(present) to create or update a Web App and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yunge Zhu (@yungezz)
-
-'''
-
-EXAMPLES = '''
- - name: Create a windows web app with non-exist app service plan
- azure_rm_webapp:
- resource_group: myResourceGroup
- name: myWinWebapp
- plan:
- resource_group: myAppServicePlan_rg
- name: myAppServicePlan
- is_linux: false
- sku: S1
-
- - name: Create a docker web app with some app settings, with docker image
- azure_rm_webapp:
- resource_group: myResourceGroup
- name: myDockerWebapp
- plan:
- resource_group: myAppServicePlan_rg
- name: myAppServicePlan
- is_linux: true
- sku: S1
- number_of_workers: 2
- app_settings:
- testkey: testvalue
- testkey2: testvalue2
- container_settings:
- name: ansible/ansible:ubuntu1404
-
- - name: Create a docker web app with private acr registry
- azure_rm_webapp:
- resource_group: myResourceGroup
- name: myDockerWebapp
- plan: myAppServicePlan
- app_settings:
- testkey: testvalue
- container_settings:
- name: ansible/ubuntu1404
- registry_server_url: myregistry.io
- registry_server_user: user
- registry_server_password: pass
-
- - name: Create a linux web app with Node 6.6 framework
- azure_rm_webapp:
- resource_group: myResourceGroup
- name: myLinuxWebapp
- plan:
- resource_group: myAppServicePlan_rg
- name: myAppServicePlan
- app_settings:
- testkey: testvalue
- frameworks:
- - name: "node"
- version: "6.6"
-
- - name: Create a windows web app with node, php
- azure_rm_webapp:
- resource_group: myResourceGroup
- name: myWinWebapp
- plan:
- resource_group: myAppServicePlan_rg
- name: myAppServicePlan
- app_settings:
- testkey: testvalue
- frameworks:
- - name: "node"
- version: 6.6
- - name: "php"
- version: "7.0"
-
- - name: Create a stage deployment slot for an existing web app
- azure_rm_webapp:
- resource_group: myResourceGroup
- name: myWebapp/slots/stage
- plan:
- resource_group: myAppServicePlan_rg
- name: myAppServicePlan
- app_settings:
- testkey:testvalue
-
- - name: Create a linux web app with java framework
- azure_rm_webapp:
- resource_group: myResourceGroup
- name: myLinuxWebapp
- plan:
- resource_group: myAppServicePlan_rg
- name: myAppServicePlan
- app_settings:
- testkey: testvalue
- frameworks:
- - name: "java"
- version: "8"
- settings:
- java_container: "Tomcat"
- java_container_version: "8.5"
-'''
-
-RETURN = '''
-azure_webapp:
- description:
- - ID of current web app.
- returned: always
- type: str
- sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp"
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrest.serialization import Model
- from azure.mgmt.web.models import (
- site_config, app_service_plan, Site,
- AppServicePlan, SkuDescription, NameValuePair
- )
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-container_settings_spec = dict(
- name=dict(type='str', required=True),
- registry_server_url=dict(type='str'),
- registry_server_user=dict(type='str'),
- registry_server_password=dict(type='str', no_log=True)
-)
-
-deployment_source_spec = dict(
- url=dict(type='str'),
- branch=dict(type='str')
-)
-
-
-framework_settings_spec = dict(
- java_container=dict(type='str', required=True),
- java_container_version=dict(type='str', required=True)
-)
-
-
-framework_spec = dict(
- name=dict(
- type='str',
- required=True,
- choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']),
- version=dict(type='str', required=True),
- settings=dict(type='dict', options=framework_settings_spec)
-)
-
-
-def _normalize_sku(sku):
- if sku is None:
- return sku
-
- sku = sku.upper()
- if sku == 'FREE':
- return 'F1'
- elif sku == 'SHARED':
- return 'D1'
- return sku
-
-
-def get_sku_name(tier):
- tier = tier.upper()
- if tier == 'F1' or tier == "FREE":
- return 'FREE'
- elif tier == 'D1' or tier == "SHARED":
- return 'SHARED'
- elif tier in ['B1', 'B2', 'B3', 'BASIC']:
- return 'BASIC'
- elif tier in ['S1', 'S2', 'S3']:
- return 'STANDARD'
- elif tier in ['P1', 'P2', 'P3']:
- return 'PREMIUM'
- elif tier in ['P1V2', 'P2V2', 'P3V2']:
- return 'PREMIUMV2'
- else:
- return None
-
-
-def appserviceplan_to_dict(plan):
- return dict(
- id=plan.id,
- name=plan.name,
- kind=plan.kind,
- location=plan.location,
- reserved=plan.reserved,
- is_linux=plan.reserved,
- provisioning_state=plan.provisioning_state,
- tags=plan.tags if plan.tags else None
- )
-
-
-def webapp_to_dict(webapp):
- return dict(
- id=webapp.id,
- name=webapp.name,
- location=webapp.location,
- client_cert_enabled=webapp.client_cert_enabled,
- enabled=webapp.enabled,
- reserved=webapp.reserved,
- client_affinity_enabled=webapp.client_affinity_enabled,
- server_farm_id=webapp.server_farm_id,
- host_names_disabled=webapp.host_names_disabled,
- https_only=webapp.https_only if hasattr(webapp, 'https_only') else None,
- skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None,
- ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None,
- state=webapp.state,
- tags=webapp.tags if webapp.tags else None
- )
-
-
-class Actions:
- CreateOrUpdate, UpdateAppSettings, Delete = range(3)
-
-
-class AzureRMWebApps(AzureRMModuleBase):
- """Configuration class for an Azure RM Web App resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- plan=dict(
- type='raw'
- ),
- frameworks=dict(
- type='list',
- elements='dict',
- options=framework_spec
- ),
- container_settings=dict(
- type='dict',
- options=container_settings_spec
- ),
- scm_type=dict(
- type='str',
- ),
- deployment_source=dict(
- type='dict',
- options=deployment_source_spec
- ),
- startup_file=dict(
- type='str'
- ),
- client_affinity_enabled=dict(
- type='bool',
- default=True
- ),
- dns_registration=dict(
- type='bool'
- ),
- https_only=dict(
- type='bool'
- ),
- skip_custom_domain_verification=dict(
- type='bool'
- ),
- ttl_in_seconds=dict(
- type='int'
- ),
- app_settings=dict(
- type='dict'
- ),
- purge_app_settings=dict(
- type='bool',
- default=False
- ),
- app_state=dict(
- type='str',
- choices=['started', 'stopped', 'restarted'],
- default='started'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- mutually_exclusive = [['container_settings', 'frameworks']]
-
- self.resource_group = None
- self.name = None
- self.location = None
-
- # update in create_or_update as parameters
- self.client_affinity_enabled = True
- self.dns_registration = None
- self.skip_custom_domain_verification = None
- self.ttl_in_seconds = None
- self.https_only = None
-
- self.tags = None
-
- # site config, e.g app settings, ssl
- self.site_config = dict()
- self.app_settings = dict()
- self.app_settings_strDic = None
-
- # app service plan
- self.plan = None
-
- # siteSourceControl
- self.deployment_source = dict()
-
- # site, used at level creation, or update. e.g windows/linux, client_affinity etc first level args
- self.site = None
-
- # property for internal usage, not used for sdk
- self.container_settings = None
-
- self.purge_app_settings = False
- self.app_state = 'started'
-
- self.results = dict(
- changed=False,
- id=None,
- )
- self.state = None
- self.to_do = []
-
- self.frameworks = None
-
- # set site_config value from kwargs
- self.site_config_updatable_properties = ["net_framework_version",
- "java_version",
- "php_version",
- "python_version",
- "scm_type"]
-
- # updatable_properties
- self.updatable_properties = ["client_affinity_enabled",
- "force_dns_registration",
- "https_only",
- "skip_custom_domain_verification",
- "ttl_in_seconds"]
-
- self.supported_linux_frameworks = ['ruby', 'php', 'dotnetcore', 'node', 'java']
- self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java']
-
- super(AzureRMWebApps, self).__init__(derived_arg_spec=self.module_arg_spec,
- mutually_exclusive=mutually_exclusive,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- if key == "scm_type":
- self.site_config[key] = kwargs[key]
-
- old_response = None
- response = None
- to_be_updated = False
-
- # set location
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- # get existing web app
- old_response = self.get_webapp()
-
- if old_response:
- self.results['id'] = old_response['id']
-
- if self.state == 'present':
- if not self.plan and not old_response:
- self.fail("Please specify plan for newly created web app.")
-
- if not self.plan:
- self.plan = old_response['server_farm_id']
-
- self.plan = self.parse_resource_to_dict(self.plan)
-
- # get app service plan
- is_linux = False
- old_plan = self.get_app_service_plan()
- if old_plan:
- is_linux = old_plan['reserved']
- else:
- is_linux = self.plan['is_linux'] if 'is_linux' in self.plan else False
-
- if self.frameworks:
- # java is mutually exclusive with other frameworks
- if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks):
- self.fail('Java is mutually exclusive with other frameworks.')
-
- if is_linux:
- if len(self.frameworks) != 1:
- self.fail('Can specify one framework only for Linux web app.')
-
- if self.frameworks[0]['name'] not in self.supported_linux_frameworks:
- self.fail('Unsupported framework {0} for Linux web app.'.format(self.frameworks[0]['name']))
-
- self.site_config['linux_fx_version'] = (self.frameworks[0]['name'] + '|' + self.frameworks[0]['version']).upper()
-
- if self.frameworks[0]['name'] == 'java':
- if self.frameworks[0]['version'] != '8':
- self.fail("Linux web app only supports java 8.")
- if self.frameworks[0]['settings'] and self.frameworks[0]['settings']['java_container'].lower() != 'tomcat':
- self.fail("Linux web app only supports tomcat container.")
-
- if self.frameworks[0]['settings'] and self.frameworks[0]['settings']['java_container'].lower() == 'tomcat':
- self.site_config['linux_fx_version'] = 'TOMCAT|' + self.frameworks[0]['settings']['java_container_version'] + '-jre8'
- else:
- self.site_config['linux_fx_version'] = 'JAVA|8-jre8'
- else:
- for fx in self.frameworks:
- if fx.get('name') not in self.supported_windows_frameworks:
- self.fail('Unsupported framework {0} for Windows web app.'.format(fx.get('name')))
- else:
- self.site_config[fx.get('name') + '_version'] = fx.get('version')
-
- if 'settings' in fx and fx['settings'] is not None:
- for key, value in fx['settings'].items():
- self.site_config[key] = value
-
- if not self.app_settings:
- self.app_settings = dict()
-
- if self.container_settings:
- linux_fx_version = 'DOCKER|'
-
- if self.container_settings.get('registry_server_url'):
- self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url']
-
- linux_fx_version += self.container_settings['registry_server_url'] + '/'
-
- linux_fx_version += self.container_settings['name']
-
- self.site_config['linux_fx_version'] = linux_fx_version
-
- if self.container_settings.get('registry_server_user'):
- self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings['registry_server_user']
-
- if self.container_settings.get('registry_server_password'):
- self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings['registry_server_password']
-
- # init site
- self.site = Site(location=self.location, site_config=self.site_config)
-
- if self.https_only is not None:
- self.site.https_only = self.https_only
-
- if self.client_affinity_enabled:
- self.site.client_affinity_enabled = self.client_affinity_enabled
-
- # check if the web app already present in the resource group
- if not old_response:
- self.log("Web App instance doesn't exist")
-
- to_be_updated = True
- self.to_do.append(Actions.CreateOrUpdate)
- self.site.tags = self.tags
-
- # service plan is required for creation
- if not self.plan:
- self.fail("Please specify app service plan in plan parameter.")
-
- if not old_plan:
- # no existing service plan, create one
- if (not self.plan.get('name') or not self.plan.get('sku')):
- self.fail('Please specify name, is_linux, sku in plan')
-
- if 'location' not in self.plan:
- plan_resource_group = self.get_resource_group(self.plan['resource_group'])
- self.plan['location'] = plan_resource_group.location
-
- old_plan = self.create_app_service_plan()
-
- self.site.server_farm_id = old_plan['id']
-
- # if linux, setup startup_file
- if old_plan['is_linux']:
- if hasattr(self, 'startup_file'):
- self.site_config['app_command_line'] = self.startup_file
-
- # set app setting
- if self.app_settings:
- app_settings = []
- for key in self.app_settings.keys():
- app_settings.append(NameValuePair(name=key, value=self.app_settings[key]))
-
- self.site_config['app_settings'] = app_settings
- else:
- # existing web app, do update
- self.log("Web App instance already exists")
-
- self.log('Result: {0}'.format(old_response))
-
- update_tags, self.site.tags = self.update_tags(old_response.get('tags', None))
-
- if update_tags:
- to_be_updated = True
-
- # check if root level property changed
- if self.is_updatable_property_changed(old_response):
- to_be_updated = True
- self.to_do.append(Actions.CreateOrUpdate)
-
- # check if site_config changed
- old_config = self.get_webapp_configuration()
-
- if self.is_site_config_changed(old_config):
- to_be_updated = True
- self.to_do.append(Actions.CreateOrUpdate)
-
- # check if linux_fx_version changed
- if old_config.linux_fx_version != self.site_config.get('linux_fx_version', ''):
- to_be_updated = True
- self.to_do.append(Actions.CreateOrUpdate)
-
- self.app_settings_strDic = self.list_app_settings()
-
- # purge existing app_settings:
- if self.purge_app_settings:
- to_be_updated = True
- self.app_settings_strDic = dict()
- self.to_do.append(Actions.UpdateAppSettings)
-
- # check if app settings changed
- if self.purge_app_settings or self.is_app_settings_changed():
- to_be_updated = True
- self.to_do.append(Actions.UpdateAppSettings)
-
- if self.app_settings:
- for key in self.app_settings.keys():
- self.app_settings_strDic[key] = self.app_settings[key]
-
- elif self.state == 'absent':
- if old_response:
- self.log("Delete Web App instance")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_webapp()
-
- self.log('Web App instance deleted')
-
- else:
- self.fail("Web app {0} not exists.".format(self.name))
-
- if to_be_updated:
- self.log('Need to Create/Update web app')
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- if Actions.CreateOrUpdate in self.to_do:
- response = self.create_update_webapp()
-
- self.results['id'] = response['id']
-
- if Actions.UpdateAppSettings in self.to_do:
- update_response = self.update_app_settings()
- self.results['id'] = update_response.id
-
- webapp = None
- if old_response:
- webapp = old_response
- if response:
- webapp = response
-
- if webapp:
- if (webapp['state'] != 'Stopped' and self.app_state == 'stopped') or \
- (webapp['state'] != 'Running' and self.app_state == 'started') or \
- self.app_state == 'restarted':
-
- self.results['changed'] = True
- if self.check_mode:
- return self.results
-
- self.set_webapp_state(self.app_state)
-
- return self.results
-
- # compare existing web app with input, determine weather it's update operation
- def is_updatable_property_changed(self, existing_webapp):
- for property_name in self.updatable_properties:
- if hasattr(self, property_name) and getattr(self, property_name) is not None and \
- getattr(self, property_name) != existing_webapp.get(property_name, None):
- return True
-
- return False
-
- # compare xxx_version
- def is_site_config_changed(self, existing_config):
- for fx_version in self.site_config_updatable_properties:
- if self.site_config.get(fx_version):
- if not getattr(existing_config, fx_version) or \
- getattr(existing_config, fx_version).upper() != self.site_config.get(fx_version).upper():
- return True
-
- return False
-
- # comparing existing app setting with input, determine whether it's changed
- def is_app_settings_changed(self):
- if self.app_settings:
- if self.app_settings_strDic:
- for key in self.app_settings.keys():
- if self.app_settings[key] != self.app_settings_strDic.get(key, None):
- return True
- else:
- return True
- return False
-
- # comparing deployment source with input, determine wheather it's changed
- def is_deployment_source_changed(self, existing_webapp):
- if self.deployment_source:
- if self.deployment_source.get('url') \
- and self.deployment_source['url'] != existing_webapp.get('site_source_control')['url']:
- return True
-
- if self.deployment_source.get('branch') \
- and self.deployment_source['branch'] != existing_webapp.get('site_source_control')['branch']:
- return True
-
- return False
-
- def create_update_webapp(self):
- '''
- Creates or updates Web App with the specified configuration.
-
- :return: deserialized Web App instance state dictionary
- '''
- self.log(
- "Creating / Updating the Web App instance {0}".format(self.name))
-
- try:
- skip_dns_registration = self.dns_registration
- force_dns_registration = None if self.dns_registration is None else not self.dns_registration
-
- response = self.web_client.web_apps.create_or_update(resource_group_name=self.resource_group,
- name=self.name,
- site_envelope=self.site,
- skip_dns_registration=skip_dns_registration,
- skip_custom_domain_verification=self.skip_custom_domain_verification,
- force_dns_registration=force_dns_registration,
- ttl_in_seconds=self.ttl_in_seconds)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the Web App instance.')
- self.fail(
- "Error creating the Web App instance: {0}".format(str(exc)))
- return webapp_to_dict(response)
-
- def delete_webapp(self):
- '''
- Deletes specified Web App instance in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the Web App instance {0}".format(self.name))
- try:
- response = self.web_client.web_apps.delete(resource_group_name=self.resource_group,
- name=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the Web App instance.')
- self.fail(
- "Error deleting the Web App instance: {0}".format(str(e)))
-
- return True
-
- def get_webapp(self):
- '''
- Gets the properties of the specified Web App.
-
- :return: deserialized Web App instance state dictionary
- '''
- self.log(
- "Checking if the Web App instance {0} is present".format(self.name))
-
- response = None
-
- try:
- response = self.web_client.web_apps.get(resource_group_name=self.resource_group,
- name=self.name)
-
- # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
- if response is not None:
- self.log("Response : {0}".format(response))
- self.log("Web App instance : {0} found".format(response.name))
- return webapp_to_dict(response)
-
- except CloudError as ex:
- pass
-
- self.log("Didn't find web app {0} in resource group {1}".format(
- self.name, self.resource_group))
-
- return False
-
- def get_app_service_plan(self):
- '''
- Gets app service plan
- :return: deserialized app service plan dictionary
- '''
- self.log("Get App Service Plan {0}".format(self.plan['name']))
-
- try:
- response = self.web_client.app_service_plans.get(
- resource_group_name=self.plan['resource_group'],
- name=self.plan['name'])
-
- # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
- if response is not None:
- self.log("Response : {0}".format(response))
- self.log("App Service Plan : {0} found".format(response.name))
-
- return appserviceplan_to_dict(response)
- except CloudError as ex:
- pass
-
- self.log("Didn't find app service plan {0} in resource group {1}".format(
- self.plan['name'], self.plan['resource_group']))
-
- return False
-
- def create_app_service_plan(self):
- '''
- Creates app service plan
- :return: deserialized app service plan dictionary
- '''
- self.log("Create App Service Plan {0}".format(self.plan['name']))
-
- try:
- # normalize sku
- sku = _normalize_sku(self.plan['sku'])
-
- sku_def = SkuDescription(tier=get_sku_name(
- sku), name=sku, capacity=(self.plan.get('number_of_workers', None)))
- plan_def = AppServicePlan(
- location=self.plan['location'], app_service_plan_name=self.plan['name'], sku=sku_def, reserved=(self.plan.get('is_linux', None)))
-
- poller = self.web_client.app_service_plans.create_or_update(
- self.plan['resource_group'], self.plan['name'], plan_def)
-
- if isinstance(poller, LROPoller):
- response = self.get_poller_result(poller)
-
- self.log("Response : {0}".format(response))
-
- return appserviceplan_to_dict(response)
- except CloudError as ex:
- self.fail("Failed to create app service plan {0} in resource group {1}: {2}".format(
- self.plan['name'], self.plan['resource_group'], str(ex)))
-
- def list_app_settings(self):
- '''
- List application settings
- :return: deserialized list response
- '''
- self.log("List application setting")
-
- try:
-
- response = self.web_client.web_apps.list_application_settings(
- resource_group_name=self.resource_group, name=self.name)
- self.log("Response : {0}".format(response))
-
- return response.properties
- except CloudError as ex:
- self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format(
- self.name, self.resource_group, str(ex)))
-
- def update_app_settings(self):
- '''
- Update application settings
- :return: deserialized updating response
- '''
- self.log("Update application setting")
-
- try:
- response = self.web_client.web_apps.update_application_settings(
- resource_group_name=self.resource_group, name=self.name, properties=self.app_settings_strDic)
- self.log("Response : {0}".format(response))
-
- return response
- except CloudError as ex:
- self.fail("Failed to update application settings for web app {0} in resource group {1}: {2}".format(
- self.name, self.resource_group, str(ex)))
-
- def create_or_update_source_control(self):
- '''
- Update site source control
- :return: deserialized updating response
- '''
- self.log("Update site source control")
-
- if self.deployment_source is None:
- return False
-
- self.deployment_source['is_manual_integration'] = False
- self.deployment_source['is_mercurial'] = False
-
- try:
- response = self.web_client.web_client.create_or_update_source_control(
- self.resource_group, self.name, self.deployment_source)
- self.log("Response : {0}".format(response))
-
- return response.as_dict()
- except CloudError as ex:
- self.fail("Failed to update site source control for web app {0} in resource group {1}".format(
- self.name, self.resource_group))
-
- def get_webapp_configuration(self):
- '''
- Get web app configuration
- :return: deserialized web app configuration response
- '''
- self.log("Get web app configuration")
-
- try:
-
- response = self.web_client.web_apps.get_configuration(
- resource_group_name=self.resource_group, name=self.name)
- self.log("Response : {0}".format(response))
-
- return response
- except CloudError as ex:
- self.log("Failed to get configuration for web app {0} in resource group {1}: {2}".format(
- self.name, self.resource_group, str(ex)))
-
- return False
-
- def set_webapp_state(self, appstate):
- '''
- Start/stop/restart web app
- :return: deserialized updating response
- '''
- try:
- if appstate == 'started':
- response = self.web_client.web_apps.start(resource_group_name=self.resource_group, name=self.name)
- elif appstate == 'stopped':
- response = self.web_client.web_apps.stop(resource_group_name=self.resource_group, name=self.name)
- elif appstate == 'restarted':
- response = self.web_client.web_apps.restart(resource_group_name=self.resource_group, name=self.name)
- else:
- self.fail("Invalid web app state {0}".format(appstate))
-
- self.log("Response : {0}".format(response))
-
- return response
- except CloudError as ex:
- request_id = ex.request_id if ex.request_id else ''
- self.log("Failed to {0} web app {1} in resource group {2}, request_id {3} - {4}".format(
- appstate, self.name, self.resource_group, request_id, str(ex)))
-
-
-def main():
- """Main execution"""
- AzureRMWebApps()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_webapp_info.py b/lib/ansible/modules/cloud/azure/azure_rm_webapp_info.py
deleted file mode 100644
index 4a3b4cd484..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_webapp_info.py
+++ /dev/null
@@ -1,488 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_webapp_info
-
-version_added: "2.9"
-
-short_description: Get Azure web app facts
-
-description:
- - Get facts for a specific web app or all web app in a resource group, or all web app in current subscription.
-
-options:
- name:
- description:
- - Only show results for a specific web app.
- resource_group:
- description:
- - Limit results by resource group.
- return_publish_profile:
- description:
- - Indicate whether to return publishing profile of the web app.
- default: False
- type: bool
- tags:
- description:
- - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
-
-extends_documentation_fragment:
- - azure
-
-author:
- - Yunge Zhu (@yungezz)
-'''
-
-EXAMPLES = '''
- - name: Get facts for web app by name
- azure_rm_webapp_info:
- resource_group: myResourceGroup
- name: winwebapp1
-
- - name: Get facts for web apps in resource group
- azure_rm_webapp_info:
- resource_group: myResourceGroup
-
- - name: Get facts for web apps with tags
- azure_rm_webapp_info:
- tags:
- - testtag
- - foo:bar
-'''
-
-RETURN = '''
-webapps:
- description:
- - List of web apps.
- returned: always
- type: complex
- contains:
- id:
- description:
- - ID of the web app.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/myWebApp
- name:
- description:
- - Name of the web app.
- returned: always
- type: str
- sample: winwebapp1
- resource_group:
- description:
- - Resource group of the web app.
- returned: always
- type: str
- sample: myResourceGroup
- location:
- description:
- - Location of the web app.
- returned: always
- type: str
- sample: eastus
- plan:
- description:
- - ID of app service plan used by the web app.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/serverfarms/myAppServicePlan
- app_settings:
- description:
- - App settings of the application. Only returned when web app has app settings.
- returned: always
- type: dict
- sample: {
- "testkey": "testvalue",
- "testkey2": "testvalue2"
- }
- frameworks:
- description:
- - Frameworks of the application. Only returned when web app has frameworks.
- returned: always
- type: list
- sample: [
- {
- "name": "net_framework",
- "version": "v4.0"
- },
- {
- "name": "java",
- "settings": {
- "java_container": "tomcat",
- "java_container_version": "8.5"
- },
- "version": "1.7"
- },
- {
- "name": "php",
- "version": "5.6"
- }
- ]
- availability_state:
- description:
- - Availability of this web app.
- returned: always
- type: str
- sample: Normal
- default_host_name:
- description:
- - Host name of the web app.
- returned: always
- type: str
- sample: vxxisurg397winapp4.azurewebsites.net
- enabled:
- description:
- - Indicates the web app enabled or not.
- returned: always
- type: bool
- sample: true
- enabled_host_names:
- description:
- - Enabled host names of the web app.
- returned: always
- type: list
- sample: [
- "vxxisurg397winapp4.azurewebsites.net",
- "vxxisurg397winapp4.scm.azurewebsites.net"
- ]
- host_name_ssl_states:
- description:
- - SSL state per host names of the web app.
- returned: always
- type: list
- sample: [
- {
- "hostType": "Standard",
- "name": "vxxisurg397winapp4.azurewebsites.net",
- "sslState": "Disabled"
- },
- {
- "hostType": "Repository",
- "name": "vxxisurg397winapp4.scm.azurewebsites.net",
- "sslState": "Disabled"
- }
- ]
- host_names:
- description:
- - Host names of the web app.
- returned: always
- type: list
- sample: [
- "vxxisurg397winapp4.azurewebsites.net"
- ]
- outbound_ip_addresses:
- description:
- - Outbound IP address of the web app.
- returned: always
- type: str
- sample: "40.71.11.131,40.85.166.200,168.62.166.67,137.135.126.248,137.135.121.45"
- ftp_publish_url:
- description:
- - Publishing URL of the web app when deployment type is FTP.
- returned: always
- type: str
- sample: ftp://xxxx.ftp.azurewebsites.windows.net
- state:
- description:
- - State of the web app.
- returned: always
- type: str
- sample: running
- publishing_username:
- description:
- - Publishing profile user name.
- returned: only when I(return_publish_profile=True).
- type: str
- sample: "$vxxisuRG397winapp4"
- publishing_password:
- description:
- - Publishing profile password.
- returned: only when I(return_publish_profile=True).
- type: str
- sample: "uvANsPQpGjWJmrFfm4Ssd5rpBSqGhjMk11pMSgW2vCsQtNx9tcgZ0xN26s9A"
- tags:
- description:
- - Tags assigned to the resource. Dictionary of string:string pairs.
- returned: always
- type: dict
- sample: { tag1: abc }
-'''
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from azure.common import AzureMissingResourceHttpError, AzureHttpError
-except Exception:
- # This is handled in azure_rm_common
- pass
-
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-AZURE_OBJECT_CLASS = 'WebApp'
-
-
-class AzureRMWebAppInfo(AzureRMModuleBase):
-
- def __init__(self):
-
- self.module_arg_spec = dict(
- name=dict(type='str'),
- resource_group=dict(type='str'),
- tags=dict(type='list'),
- return_publish_profile=dict(type='bool', default=False),
- )
-
- self.results = dict(
- changed=False,
- webapps=[],
- )
-
- self.name = None
- self.resource_group = None
- self.tags = None
- self.return_publish_profile = False
-
- self.framework_names = ['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']
-
- super(AzureRMWebAppInfo, self).__init__(self.module_arg_spec,
- supports_tags=False,
- facts_module=True)
-
- def exec_module(self, **kwargs):
- is_old_facts = self.module._name == 'azure_rm_webapp_facts'
- if is_old_facts:
- self.module.deprecate("The 'azure_rm_webapp_facts' module has been renamed to 'azure_rm_webapp_info'", version='2.13')
-
- for key in self.module_arg_spec:
- setattr(self, key, kwargs[key])
-
- if self.name:
- self.results['webapps'] = self.list_by_name()
- elif self.resource_group:
- self.results['webapps'] = self.list_by_resource_group()
- else:
- self.results['webapps'] = self.list_all()
-
- return self.results
-
- def list_by_name(self):
- self.log('Get web app {0}'.format(self.name))
- item = None
- result = []
-
- try:
- item = self.web_client.web_apps.get(self.resource_group, self.name)
- except CloudError:
- pass
-
- if item and self.has_tags(item.tags, self.tags):
- curated_result = self.get_curated_webapp(self.resource_group, self.name, item)
- result = [curated_result]
-
- return result
-
- def list_by_resource_group(self):
- self.log('List web apps in resource groups {0}'.format(self.resource_group))
- try:
- response = list(self.web_client.web_apps.list_by_resource_group(self.resource_group))
- except CloudError as exc:
- request_id = exc.request_id if exc.request_id else ''
- self.fail("Error listing web apps in resource groups {0}, request id: {1} - {2}".format(self.resource_group, request_id, str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- curated_output = self.get_curated_webapp(self.resource_group, item.name, item)
- results.append(curated_output)
- return results
-
- def list_all(self):
- self.log('List web apps in current subscription')
- try:
- response = list(self.web_client.web_apps.list())
- except CloudError as exc:
- request_id = exc.request_id if exc.request_id else ''
- self.fail("Error listing web apps, request id {0} - {1}".format(request_id, str(exc)))
-
- results = []
- for item in response:
- if self.has_tags(item.tags, self.tags):
- curated_output = self.get_curated_webapp(item.resource_group, item.name, item)
- results.append(curated_output)
- return results
-
- def list_webapp_configuration(self, resource_group, name):
- self.log('Get web app {0} configuration'.format(name))
-
- response = []
-
- try:
- response = self.web_client.web_apps.get_configuration(resource_group_name=resource_group, name=name)
- except CloudError as ex:
- request_id = ex.request_id if ex.request_id else ''
- self.fail('Error getting web app {0} configuration, request id {1} - {2}'.format(name, request_id, str(ex)))
-
- return response.as_dict()
-
- def list_webapp_appsettings(self, resource_group, name):
- self.log('Get web app {0} app settings'.format(name))
-
- response = []
-
- try:
- response = self.web_client.web_apps.list_application_settings(resource_group_name=resource_group, name=name)
- except CloudError as ex:
- request_id = ex.request_id if ex.request_id else ''
- self.fail('Error getting web app {0} app settings, request id {1} - {2}'.format(name, request_id, str(ex)))
-
- return response.as_dict()
-
- def get_publish_credentials(self, resource_group, name):
- self.log('Get web app {0} publish credentials'.format(name))
- try:
- poller = self.web_client.web_apps.list_publishing_credentials(resource_group, name)
- if isinstance(poller, LROPoller):
- response = self.get_poller_result(poller)
- except CloudError as ex:
- request_id = ex.request_id if ex.request_id else ''
- self.fail('Error getting web app {0} publishing credentials - {1}'.format(request_id, str(ex)))
- return response
-
- def get_webapp_ftp_publish_url(self, resource_group, name):
- import xmltodict
-
- self.log('Get web app {0} app publish profile'.format(name))
-
- url = None
- try:
- content = self.web_client.web_apps.list_publishing_profile_xml_with_secrets(resource_group_name=resource_group, name=name)
- if not content:
- return url
-
- full_xml = ''
- for f in content:
- full_xml += f.decode()
- profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
-
- if not profiles:
- return url
-
- for profile in profiles:
- if profile['@publishMethod'] == 'FTP':
- url = profile['@publishUrl']
-
- except CloudError as ex:
- self.fail('Error getting web app {0} app settings'.format(name))
-
- return url
-
- def get_curated_webapp(self, resource_group, name, webapp):
- pip = self.serialize_obj(webapp, AZURE_OBJECT_CLASS)
-
- try:
- site_config = self.list_webapp_configuration(resource_group, name)
- app_settings = self.list_webapp_appsettings(resource_group, name)
- publish_cred = self.get_publish_credentials(resource_group, name)
- ftp_publish_url = self.get_webapp_ftp_publish_url(resource_group, name)
- except CloudError as ex:
- pass
- return self.construct_curated_webapp(webapp=pip,
- configuration=site_config,
- app_settings=app_settings,
- deployment_slot=None,
- ftp_publish_url=ftp_publish_url,
- publish_credentials=publish_cred)
-
- def construct_curated_webapp(self,
- webapp,
- configuration=None,
- app_settings=None,
- deployment_slot=None,
- ftp_publish_url=None,
- publish_credentials=None):
- curated_output = dict()
- curated_output['id'] = webapp['id']
- curated_output['name'] = webapp['name']
- curated_output['resource_group'] = webapp['properties']['resourceGroup']
- curated_output['location'] = webapp['location']
- curated_output['plan'] = webapp['properties']['serverFarmId']
- curated_output['tags'] = webapp.get('tags', None)
-
- # important properties from output. not match input arguments.
- curated_output['app_state'] = webapp['properties']['state']
- curated_output['availability_state'] = webapp['properties']['availabilityState']
- curated_output['default_host_name'] = webapp['properties']['defaultHostName']
- curated_output['host_names'] = webapp['properties']['hostNames']
- curated_output['enabled'] = webapp['properties']['enabled']
- curated_output['enabled_host_names'] = webapp['properties']['enabledHostNames']
- curated_output['host_name_ssl_states'] = webapp['properties']['hostNameSslStates']
- curated_output['outbound_ip_addresses'] = webapp['properties']['outboundIpAddresses']
-
- # curated site_config
- if configuration:
- curated_output['frameworks'] = []
- for fx_name in self.framework_names:
- fx_version = configuration.get(fx_name + '_version', None)
- if fx_version:
- fx = {
- 'name': fx_name,
- 'version': fx_version
- }
- # java container setting
- if fx_name == 'java':
- if configuration['java_container'] and configuration['java_container_version']:
- settings = {
- 'java_container': configuration['java_container'].lower(),
- 'java_container_version': configuration['java_container_version']
- }
- fx['settings'] = settings
-
- curated_output['frameworks'].append(fx)
-
- # linux_fx_version
- if configuration.get('linux_fx_version', None):
- tmp = configuration.get('linux_fx_version').split("|")
- if len(tmp) == 2:
- curated_output['frameworks'].append({'name': tmp[0].lower(), 'version': tmp[1]})
-
- # curated app_settings
- if app_settings and app_settings.get('properties', None):
- curated_output['app_settings'] = dict()
- for item in app_settings['properties']:
- curated_output['app_settings'][item] = app_settings['properties'][item]
-
- # curated deploymenet_slot
- if deployment_slot:
- curated_output['deployment_slot'] = deployment_slot
-
- # ftp_publish_url
- if ftp_publish_url:
- curated_output['ftp_publish_url'] = ftp_publish_url
-
- # curated publish credentials
- if publish_credentials and self.return_publish_profile:
- curated_output['publishing_username'] = publish_credentials.publishing_user_name
- curated_output['publishing_password'] = publish_credentials.publishing_password
- return curated_output
-
-
-def main():
- AzureRMWebAppInfo()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_webappslot.py b/lib/ansible/modules/cloud/azure/azure_rm_webappslot.py
deleted file mode 100644
index ddba710b9d..0000000000
--- a/lib/ansible/modules/cloud/azure/azure_rm_webappslot.py
+++ /dev/null
@@ -1,1058 +0,0 @@
-#!/usr/bin/python
-#
-# Copyright (c) 2018 Yunge Zhu, <yungez@microsoft.com>
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = '''
----
-module: azure_rm_webappslot
-version_added: "2.8"
-short_description: Manage Azure Web App slot
-description:
- - Create, update and delete Azure Web App slot.
-
-options:
- resource_group:
- description:
- - Name of the resource group to which the resource belongs.
- required: True
- name:
- description:
- - Unique name of the deployment slot to create or update.
- required: True
- webapp_name:
- description:
- - Web app name which this deployment slot belongs to.
- required: True
- location:
- description:
- - Resource location. If not set, location from the resource group will be used as default.
- configuration_source:
- description:
- - Source slot to clone configurations from when creating slot. Use webapp's name to refer to the production slot.
- auto_swap_slot_name:
- description:
- - Used to configure target slot name to auto swap, or disable auto swap.
- - Set it target slot name to auto swap.
- - Set it to False to disable auto slot swap.
- swap:
- description:
- - Swap deployment slots of a web app.
- suboptions:
- action:
- description:
- - Swap types.
- - C(preview) is to apply target slot settings on source slot first.
- - C(swap) is to complete swapping.
- - C(reset) is to reset the swap.
- choices:
- - preview
- - swap
- - reset
- default: preview
- target_slot:
- description:
- - Name of target slot to swap. If set to None, then swap with production slot.
- preserve_vnet:
- description:
- - C(True) to preserve virtual network to the slot during swap. Otherwise C(False).
- type: bool
- default: True
- frameworks:
- description:
- - Set of run time framework settings. Each setting is a dictionary.
- - See U(https://docs.microsoft.com/en-us/azure/app-service/app-service-web-overview) for more info.
- suboptions:
- name:
- description:
- - Name of the framework.
- - Supported framework list for Windows web app and Linux web app is different.
- - Windows web apps support C(java), C(net_framework), C(php), C(python), and C(node) from June 2018.
- - Windows web apps support multiple framework at same time.
- - Linux web apps support C(java), C(ruby), C(php), C(dotnetcore), and C(node) from June 2018.
- - Linux web apps support only one framework.
- - Java framework is mutually exclusive with others.
- choices:
- - java
- - net_framework
- - php
- - python
- - ruby
- - dotnetcore
- - node
- version:
- description:
- - Version of the framework. For Linux web app supported value, see U(https://aka.ms/linux-stacks) for more info.
- - C(net_framework) supported value sample, C(v4.0) for .NET 4.6 and C(v3.0) for .NET 3.5.
- - C(php) supported value sample, C(5.5), C(5.6), C(7.0).
- - C(python) supported value sample, C(5.5), C(5.6), C(7.0).
- - C(node) supported value sample, C(6.6), C(6.9).
- - C(dotnetcore) supported value sample, C(1.0), C(1.1), C(1.2).
- - C(ruby) supported value sample, 2.3.
- - C(java) supported value sample, C(1.9) for Windows web app. C(1.8) for Linux web app.
- settings:
- description:
- - List of settings of the framework.
- suboptions:
- java_container:
- description:
- - Name of Java container. This is supported by specific framework C(java) onlys, for example C(Tomcat), C(Jetty).
- java_container_version:
- description:
- - Version of Java container. This is supported by specific framework C(java) only.
- - For C(Tomcat), for example C(8.0), C(8.5), C(9.0). For C(Jetty), for example C(9.1), C(9.3).
- container_settings:
- description:
- - Web app slot container settings.
- suboptions:
- name:
- description:
- - Name of container, for example C(imagename:tag).
- registry_server_url:
- description:
- - Container registry server URL, for example C(mydockerregistry.io).
- registry_server_user:
- description:
- - The container registry server user name.
- registry_server_password:
- description:
- - The container registry server password.
- startup_file:
- description:
- - The slot startup file.
- - This only applies for Linux web app slot.
- app_settings:
- description:
- - Configure web app slot application settings. Suboptions are in key value pair format.
- purge_app_settings:
- description:
- - Purge any existing application settings. Replace slot application settings with app_settings.
- type: bool
- deployment_source:
- description:
- - Deployment source for git.
- suboptions:
- url:
- description:
- - Repository URL of deployment source.
- branch:
- description:
- - The branch name of the repository.
- app_state:
- description:
- - Start/Stop/Restart the slot.
- type: str
- choices:
- - started
- - stopped
- - restarted
- default: started
- state:
- description:
- - State of the Web App deployment slot.
- - Use C(present) to create or update a slot and C(absent) to delete it.
- default: present
- choices:
- - absent
- - present
-
-extends_documentation_fragment:
- - azure
- - azure_tags
-
-author:
- - Yunge Zhu(@yungezz)
-
-'''
-
-EXAMPLES = '''
- - name: Create a webapp slot
- azure_rm_webappslot:
- resource_group: myResourceGroup
- webapp_name: myJavaWebApp
- name: stage
- configuration_source: myJavaWebApp
- app_settings:
- testkey: testvalue
-
- - name: swap the slot with production slot
- azure_rm_webappslot:
- resource_group: myResourceGroup
- webapp_name: myJavaWebApp
- name: stage
- swap:
- action: swap
-
- - name: stop the slot
- azure_rm_webappslot:
- resource_group: myResourceGroup
- webapp_name: myJavaWebApp
- name: stage
- app_state: stopped
-
- - name: udpate a webapp slot app settings
- azure_rm_webappslot:
- resource_group: myResourceGroup
- webapp_name: myJavaWebApp
- name: stage
- app_settings:
- testkey: testvalue2
-
- - name: udpate a webapp slot frameworks
- azure_rm_webappslot:
- resource_group: myResourceGroup
- webapp_name: myJavaWebApp
- name: stage
- frameworks:
- - name: "node"
- version: "10.1"
-'''
-
-RETURN = '''
-id:
- description:
- - ID of current slot.
- returned: always
- type: str
- sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Web/sites/testapp/slots/stage1
-'''
-
-import time
-from ansible.module_utils.azure_rm_common import AzureRMModuleBase
-
-try:
- from msrestazure.azure_exceptions import CloudError
- from msrest.polling import LROPoller
- from msrest.serialization import Model
- from azure.mgmt.web.models import (
- site_config, app_service_plan, Site,
- AppServicePlan, SkuDescription, NameValuePair
- )
-except ImportError:
- # This is handled in azure_rm_common
- pass
-
-swap_spec = dict(
- action=dict(
- type='str',
- choices=[
- 'preview',
- 'swap',
- 'reset'
- ],
- default='preview'
- ),
- target_slot=dict(
- type='str'
- ),
- preserve_vnet=dict(
- type='bool',
- default=True
- )
-)
-
-container_settings_spec = dict(
- name=dict(type='str', required=True),
- registry_server_url=dict(type='str'),
- registry_server_user=dict(type='str'),
- registry_server_password=dict(type='str', no_log=True)
-)
-
-deployment_source_spec = dict(
- url=dict(type='str'),
- branch=dict(type='str')
-)
-
-
-framework_settings_spec = dict(
- java_container=dict(type='str', required=True),
- java_container_version=dict(type='str', required=True)
-)
-
-
-framework_spec = dict(
- name=dict(
- type='str',
- required=True,
- choices=['net_framework', 'java', 'php', 'node', 'python', 'dotnetcore', 'ruby']),
- version=dict(type='str', required=True),
- settings=dict(type='dict', options=framework_settings_spec)
-)
-
-
-def webapp_to_dict(webapp):
- return dict(
- id=webapp.id,
- name=webapp.name,
- location=webapp.location,
- client_cert_enabled=webapp.client_cert_enabled,
- enabled=webapp.enabled,
- reserved=webapp.reserved,
- client_affinity_enabled=webapp.client_affinity_enabled,
- server_farm_id=webapp.server_farm_id,
- host_names_disabled=webapp.host_names_disabled,
- https_only=webapp.https_only if hasattr(webapp, 'https_only') else None,
- skip_custom_domain_verification=webapp.skip_custom_domain_verification if hasattr(webapp, 'skip_custom_domain_verification') else None,
- ttl_in_seconds=webapp.ttl_in_seconds if hasattr(webapp, 'ttl_in_seconds') else None,
- state=webapp.state,
- tags=webapp.tags if webapp.tags else None
- )
-
-
-def slot_to_dict(slot):
- return dict(
- id=slot.id,
- resource_group=slot.resource_group,
- server_farm_id=slot.server_farm_id,
- target_swap_slot=slot.target_swap_slot,
- enabled_host_names=slot.enabled_host_names,
- slot_swap_status=slot.slot_swap_status,
- name=slot.name,
- location=slot.location,
- enabled=slot.enabled,
- reserved=slot.reserved,
- host_names_disabled=slot.host_names_disabled,
- state=slot.state,
- repository_site_name=slot.repository_site_name,
- default_host_name=slot.default_host_name,
- kind=slot.kind,
- site_config=slot.site_config,
- tags=slot.tags if slot.tags else None
- )
-
-
-class Actions:
- NoAction, CreateOrUpdate, UpdateAppSettings, Delete = range(4)
-
-
-class AzureRMWebAppSlots(AzureRMModuleBase):
- """Configuration class for an Azure RM Web App slot resource"""
-
- def __init__(self):
- self.module_arg_spec = dict(
- resource_group=dict(
- type='str',
- required=True
- ),
- name=dict(
- type='str',
- required=True
- ),
- webapp_name=dict(
- type='str',
- required=True
- ),
- location=dict(
- type='str'
- ),
- configuration_source=dict(
- type='str'
- ),
- auto_swap_slot_name=dict(
- type='raw'
- ),
- swap=dict(
- type='dict',
- options=swap_spec
- ),
- frameworks=dict(
- type='list',
- elements='dict',
- options=framework_spec
- ),
- container_settings=dict(
- type='dict',
- options=container_settings_spec
- ),
- deployment_source=dict(
- type='dict',
- options=deployment_source_spec
- ),
- startup_file=dict(
- type='str'
- ),
- app_settings=dict(
- type='dict'
- ),
- purge_app_settings=dict(
- type='bool',
- default=False
- ),
- app_state=dict(
- type='str',
- choices=['started', 'stopped', 'restarted'],
- default='started'
- ),
- state=dict(
- type='str',
- default='present',
- choices=['present', 'absent']
- )
- )
-
- mutually_exclusive = [['container_settings', 'frameworks']]
-
- self.resource_group = None
- self.name = None
- self.webapp_name = None
- self.location = None
-
- self.auto_swap_slot_name = None
- self.swap = None
- self.tags = None
- self.startup_file = None
- self.configuration_source = None
- self.clone = False
-
- # site config, e.g app settings, ssl
- self.site_config = dict()
- self.app_settings = dict()
- self.app_settings_strDic = None
-
- # siteSourceControl
- self.deployment_source = dict()
-
- # site, used at level creation, or update.
- self.site = None
-
- # property for internal usage, not used for sdk
- self.container_settings = None
-
- self.purge_app_settings = False
- self.app_state = 'started'
-
- self.results = dict(
- changed=False,
- id=None,
- )
- self.state = None
- self.to_do = Actions.NoAction
-
- self.frameworks = None
-
- # set site_config value from kwargs
- self.site_config_updatable_frameworks = ["net_framework_version",
- "java_version",
- "php_version",
- "python_version",
- "linux_fx_version"]
-
- self.supported_linux_frameworks = ['ruby', 'php', 'dotnetcore', 'node', 'java']
- self.supported_windows_frameworks = ['net_framework', 'php', 'python', 'node', 'java']
-
- super(AzureRMWebAppSlots, self).__init__(derived_arg_spec=self.module_arg_spec,
- mutually_exclusive=mutually_exclusive,
- supports_check_mode=True,
- supports_tags=True)
-
- def exec_module(self, **kwargs):
- """Main module execution method"""
-
- for key in list(self.module_arg_spec.keys()) + ['tags']:
- if hasattr(self, key):
- setattr(self, key, kwargs[key])
- elif kwargs[key] is not None:
- if key == "scm_type":
- self.site_config[key] = kwargs[key]
-
- old_response = None
- response = None
- to_be_updated = False
-
- # set location
- resource_group = self.get_resource_group(self.resource_group)
- if not self.location:
- self.location = resource_group.location
-
- # get web app
- webapp_response = self.get_webapp()
-
- if not webapp_response:
- self.fail("Web app {0} does not exist in resource group {1}.".format(self.webapp_name, self.resource_group))
-
- # get slot
- old_response = self.get_slot()
-
- # set is_linux
- is_linux = True if webapp_response['reserved'] else False
-
- if self.state == 'present':
- if self.frameworks:
- # java is mutually exclusive with other frameworks
- if len(self.frameworks) > 1 and any(f['name'] == 'java' for f in self.frameworks):
- self.fail('Java is mutually exclusive with other frameworks.')
-
- if is_linux:
- if len(self.frameworks) != 1:
- self.fail('Can specify one framework only for Linux web app.')
-
- if self.frameworks[0]['name'] not in self.supported_linux_frameworks:
- self.fail('Unsupported framework {0} for Linux web app.'.format(self.frameworks[0]['name']))
-
- self.site_config['linux_fx_version'] = (self.frameworks[0]['name'] + '|' + self.frameworks[0]['version']).upper()
-
- if self.frameworks[0]['name'] == 'java':
- if self.frameworks[0]['version'] != '8':
- self.fail("Linux web app only supports java 8.")
-
- if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \
- self.frameworks[0]['settings']['java_container'].lower() != 'tomcat':
- self.fail("Linux web app only supports tomcat container.")
-
- if self.frameworks[0].get('settings', {}) and self.frameworks[0]['settings'].get('java_container', None) and \
- self.frameworks[0]['settings']['java_container'].lower() == 'tomcat':
- self.site_config['linux_fx_version'] = 'TOMCAT|' + self.frameworks[0]['settings']['java_container_version'] + '-jre8'
- else:
- self.site_config['linux_fx_version'] = 'JAVA|8-jre8'
- else:
- for fx in self.frameworks:
- if fx.get('name') not in self.supported_windows_frameworks:
- self.fail('Unsupported framework {0} for Windows web app.'.format(fx.get('name')))
- else:
- self.site_config[fx.get('name') + '_version'] = fx.get('version')
-
- if 'settings' in fx and fx['settings'] is not None:
- for key, value in fx['settings'].items():
- self.site_config[key] = value
-
- if not self.app_settings:
- self.app_settings = dict()
-
- if self.container_settings:
- linux_fx_version = 'DOCKER|'
-
- if self.container_settings.get('registry_server_url'):
- self.app_settings['DOCKER_REGISTRY_SERVER_URL'] = 'https://' + self.container_settings['registry_server_url']
-
- linux_fx_version += self.container_settings['registry_server_url'] + '/'
-
- linux_fx_version += self.container_settings['name']
-
- self.site_config['linux_fx_version'] = linux_fx_version
-
- if self.container_settings.get('registry_server_user'):
- self.app_settings['DOCKER_REGISTRY_SERVER_USERNAME'] = self.container_settings['registry_server_user']
-
- if self.container_settings.get('registry_server_password'):
- self.app_settings['DOCKER_REGISTRY_SERVER_PASSWORD'] = self.container_settings['registry_server_password']
-
- # set auto_swap_slot_name
- if self.auto_swap_slot_name and isinstance(self.auto_swap_slot_name, str):
- self.site_config['auto_swap_slot_name'] = self.auto_swap_slot_name
- if self.auto_swap_slot_name is False:
- self.site_config['auto_swap_slot_name'] = None
-
- # init site
- self.site = Site(location=self.location, site_config=self.site_config)
-
- # check if the slot already present in the webapp
- if not old_response:
- self.log("Web App slot doesn't exist")
-
- to_be_updated = True
- self.to_do = Actions.CreateOrUpdate
- self.site.tags = self.tags
-
- # if linux, setup startup_file
- if self.startup_file:
- self.site_config['app_command_line'] = self.startup_file
-
- # set app setting
- if self.app_settings:
- app_settings = []
- for key in self.app_settings.keys():
- app_settings.append(NameValuePair(name=key, value=self.app_settings[key]))
-
- self.site_config['app_settings'] = app_settings
-
- # clone slot
- if self.configuration_source:
- self.clone = True
-
- else:
- # existing slot, do update
- self.log("Web App slot already exists")
-
- self.log('Result: {0}'.format(old_response))
-
- update_tags, self.site.tags = self.update_tags(old_response.get('tags', None))
-
- if update_tags:
- to_be_updated = True
-
- # check if site_config changed
- old_config = self.get_configuration_slot(self.name)
-
- if self.is_site_config_changed(old_config):
- to_be_updated = True
- self.to_do = Actions.CreateOrUpdate
-
- self.app_settings_strDic = self.list_app_settings_slot(self.name)
-
- # purge existing app_settings:
- if self.purge_app_settings:
- to_be_updated = True
- self.to_do = Actions.UpdateAppSettings
- self.app_settings_strDic = dict()
-
- # check if app settings changed
- if self.purge_app_settings or self.is_app_settings_changed():
- to_be_updated = True
- self.to_do = Actions.UpdateAppSettings
-
- if self.app_settings:
- for key in self.app_settings.keys():
- self.app_settings_strDic[key] = self.app_settings[key]
-
- elif self.state == 'absent':
- if old_response:
- self.log("Delete Web App slot")
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- self.delete_slot()
-
- self.log('Web App slot deleted')
-
- else:
- self.log("Web app slot {0} not exists.".format(self.name))
-
- if to_be_updated:
- self.log('Need to Create/Update web app')
- self.results['changed'] = True
-
- if self.check_mode:
- return self.results
-
- if self.to_do == Actions.CreateOrUpdate:
- response = self.create_update_slot()
-
- self.results['id'] = response['id']
-
- if self.clone:
- self.clone_slot()
-
- if self.to_do == Actions.UpdateAppSettings:
- self.update_app_settings_slot()
-
- slot = None
- if response:
- slot = response
- if old_response:
- slot = old_response
-
- if slot:
- if (slot['state'] != 'Stopped' and self.app_state == 'stopped') or \
- (slot['state'] != 'Running' and self.app_state == 'started') or \
- self.app_state == 'restarted':
-
- self.results['changed'] = True
- if self.check_mode:
- return self.results
-
- self.set_state_slot(self.app_state)
-
- if self.swap:
- self.results['changed'] = True
- if self.check_mode:
- return self.results
-
- self.swap_slot()
-
- return self.results
-
- # compare site config
- def is_site_config_changed(self, existing_config):
- for fx_version in self.site_config_updatable_frameworks:
- if self.site_config.get(fx_version):
- if not getattr(existing_config, fx_version) or \
- getattr(existing_config, fx_version).upper() != self.site_config.get(fx_version).upper():
- return True
-
- if self.auto_swap_slot_name is False and existing_config.auto_swap_slot_name is not None:
- return True
- elif self.auto_swap_slot_name and self.auto_swap_slot_name != getattr(existing_config, 'auto_swap_slot_name', None):
- return True
- return False
-
- # comparing existing app setting with input, determine whether it's changed
- def is_app_settings_changed(self):
- if self.app_settings:
- if len(self.app_settings_strDic) != len(self.app_settings):
- return True
-
- if self.app_settings_strDic != self.app_settings:
- return True
- return False
-
- # comparing deployment source with input, determine whether it's changed
- def is_deployment_source_changed(self, existing_webapp):
- if self.deployment_source:
- if self.deployment_source.get('url') \
- and self.deployment_source['url'] != existing_webapp.get('site_source_control')['url']:
- return True
-
- if self.deployment_source.get('branch') \
- and self.deployment_source['branch'] != existing_webapp.get('site_source_control')['branch']:
- return True
-
- return False
-
- def create_update_slot(self):
- '''
- Creates or updates Web App slot with the specified configuration.
-
- :return: deserialized Web App instance state dictionary
- '''
- self.log(
- "Creating / Updating the Web App slot {0}".format(self.name))
-
- try:
- response = self.web_client.web_apps.create_or_update_slot(resource_group_name=self.resource_group,
- slot=self.name,
- name=self.webapp_name,
- site_envelope=self.site)
- if isinstance(response, LROPoller):
- response = self.get_poller_result(response)
-
- except CloudError as exc:
- self.log('Error attempting to create the Web App slot instance.')
- self.fail("Error creating the Web App slot: {0}".format(str(exc)))
- return slot_to_dict(response)
-
- def delete_slot(self):
- '''
- Deletes specified Web App slot in the specified subscription and resource group.
-
- :return: True
- '''
- self.log("Deleting the Web App slot {0}".format(self.name))
- try:
- response = self.web_client.web_apps.delete_slot(resource_group_name=self.resource_group,
- name=self.webapp_name,
- slot=self.name)
- except CloudError as e:
- self.log('Error attempting to delete the Web App slot.')
- self.fail(
- "Error deleting the Web App slots: {0}".format(str(e)))
-
- return True
-
- def get_webapp(self):
- '''
- Gets the properties of the specified Web App.
-
- :return: deserialized Web App instance state dictionary
- '''
- self.log(
- "Checking if the Web App instance {0} is present".format(self.webapp_name))
-
- response = None
-
- try:
- response = self.web_client.web_apps.get(resource_group_name=self.resource_group,
- name=self.webapp_name)
-
- # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
- if response is not None:
- self.log("Response : {0}".format(response))
- self.log("Web App instance : {0} found".format(response.name))
- return webapp_to_dict(response)
-
- except CloudError as ex:
- pass
-
- self.log("Didn't find web app {0} in resource group {1}".format(
- self.webapp_name, self.resource_group))
-
- return False
-
- def get_slot(self):
- '''
- Gets the properties of the specified Web App slot.
-
- :return: deserialized Web App slot state dictionary
- '''
- self.log(
- "Checking if the Web App slot {0} is present".format(self.name))
-
- response = None
-
- try:
- response = self.web_client.web_apps.get_slot(resource_group_name=self.resource_group,
- name=self.webapp_name,
- slot=self.name)
-
- # Newer SDK versions (0.40.0+) seem to return None if it doesn't exist instead of raising CloudError
- if response is not None:
- self.log("Response : {0}".format(response))
- self.log("Web App slot: {0} found".format(response.name))
- return slot_to_dict(response)
-
- except CloudError as ex:
- pass
-
- self.log("Does not find web app slot {0} in resource group {1}".format(self.name, self.resource_group))
-
- return False
-
- def list_app_settings(self):
- '''
- List webapp application settings
- :return: deserialized list response
- '''
- self.log("List webapp application setting")
-
- try:
-
- response = self.web_client.web_apps.list_application_settings(
- resource_group_name=self.resource_group, name=self.webapp_name)
- self.log("Response : {0}".format(response))
-
- return response.properties
- except CloudError as ex:
- self.fail("Failed to list application settings for web app {0} in resource group {1}: {2}".format(
- self.name, self.resource_group, str(ex)))
-
- def list_app_settings_slot(self, slot_name):
- '''
- List application settings
- :return: deserialized list response
- '''
- self.log("List application setting")
-
- try:
-
- response = self.web_client.web_apps.list_application_settings_slot(
- resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name)
- self.log("Response : {0}".format(response))
-
- return response.properties
- except CloudError as ex:
- self.fail("Failed to list application settings for web app slot {0} in resource group {1}: {2}".format(
- self.name, self.resource_group, str(ex)))
-
- def update_app_settings_slot(self, slot_name=None, app_settings=None):
- '''
- Update application settings
- :return: deserialized updating response
- '''
- self.log("Update application setting")
-
- if slot_name is None:
- slot_name = self.name
- if app_settings is None:
- app_settings = self.app_settings_strDic
- try:
- response = self.web_client.web_apps.update_application_settings_slot(resource_group_name=self.resource_group,
- name=self.webapp_name,
- slot=slot_name,
- kind=None,
- properties=app_settings)
- self.log("Response : {0}".format(response))
-
- return response.as_dict()
- except CloudError as ex:
- self.fail("Failed to update application settings for web app slot {0} in resource group {1}: {2}".format(
- self.name, self.resource_group, str(ex)))
-
- return response
-
- def create_or_update_source_control_slot(self):
- '''
- Update site source control
- :return: deserialized updating response
- '''
- self.log("Update site source control")
-
- if self.deployment_source is None:
- return False
-
- self.deployment_source['is_manual_integration'] = False
- self.deployment_source['is_mercurial'] = False
-
- try:
- response = self.web_client.web_client.create_or_update_source_control_slot(
- resource_group_name=self.resource_group,
- name=self.webapp_name,
- site_source_control=self.deployment_source,
- slot=self.name)
- self.log("Response : {0}".format(response))
-
- return response.as_dict()
- except CloudError as ex:
- self.fail("Failed to update site source control for web app slot {0} in resource group {1}: {2}".format(
- self.name, self.resource_group, str(ex)))
-
- def get_configuration(self):
- '''
- Get web app configuration
- :return: deserialized web app configuration response
- '''
- self.log("Get web app configuration")
-
- try:
-
- response = self.web_client.web_apps.get_configuration(
- resource_group_name=self.resource_group, name=self.webapp_name)
- self.log("Response : {0}".format(response))
-
- return response
- except CloudError as ex:
- self.fail("Failed to get configuration for web app {0} in resource group {1}: {2}".format(
- self.webapp_name, self.resource_group, str(ex)))
-
- def get_configuration_slot(self, slot_name):
- '''
- Get slot configuration
- :return: deserialized slot configuration response
- '''
- self.log("Get web app slot configuration")
-
- try:
-
- response = self.web_client.web_apps.get_configuration_slot(
- resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name)
- self.log("Response : {0}".format(response))
-
- return response
- except CloudError as ex:
- self.fail("Failed to get configuration for web app slot {0} in resource group {1}: {2}".format(
- slot_name, self.resource_group, str(ex)))
-
- def update_configuration_slot(self, slot_name=None, site_config=None):
- '''
- Update slot configuration
- :return: deserialized slot configuration response
- '''
- self.log("Update web app slot configuration")
-
- if slot_name is None:
- slot_name = self.name
- if site_config is None:
- site_config = self.site_config
- try:
-
- response = self.web_client.web_apps.update_configuration_slot(
- resource_group_name=self.resource_group, name=self.webapp_name, slot=slot_name, site_config=site_config)
- self.log("Response : {0}".format(response))
-
- return response
- except CloudError as ex:
- self.fail("Failed to update configuration for web app slot {0} in resource group {1}: {2}".format(
- slot_name, self.resource_group, str(ex)))
-
- def set_state_slot(self, appstate):
- '''
- Start/stop/restart web app slot
- :return: deserialized updating response
- '''
- try:
- if appstate == 'started':
- response = self.web_client.web_apps.start_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
- elif appstate == 'stopped':
- response = self.web_client.web_apps.stop_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
- elif appstate == 'restarted':
- response = self.web_client.web_apps.restart_slot(resource_group_name=self.resource_group, name=self.webapp_name, slot=self.name)
- else:
- self.fail("Invalid web app slot state {0}".format(appstate))
-
- self.log("Response : {0}".format(response))
-
- return response
- except CloudError as ex:
- request_id = ex.request_id if ex.request_id else ''
- self.fail("Failed to {0} web app slot {1} in resource group {2}, request_id {3} - {4}".format(
- appstate, self.name, self.resource_group, request_id, str(ex)))
-
- def swap_slot(self):
- '''
- Swap slot
- :return: deserialized response
- '''
- self.log("Swap slot")
-
- try:
- if self.swap['action'] == 'swap':
- if self.swap['target_slot'] is None:
- response = self.web_client.web_apps.swap_slot_with_production(resource_group_name=self.resource_group,
- name=self.webapp_name,
- target_slot=self.name,
- preserve_vnet=self.swap['preserve_vnet'])
- else:
- response = self.web_client.web_apps.swap_slot_slot(resource_group_name=self.resource_group,
- name=self.webapp_name,
- slot=self.name,
- target_slot=self.swap['target_slot'],
- preserve_vnet=self.swap['preserve_vnet'])
- elif self.swap['action'] == 'preview':
- if self.swap['target_slot'] is None:
- response = self.web_client.web_apps.apply_slot_config_to_production(resource_group_name=self.resource_group,
- name=self.webapp_name,
- target_slot=self.name,
- preserve_vnet=self.swap['preserve_vnet'])
- else:
- response = self.web_client.web_apps.apply_slot_configuration_slot(resource_group_name=self.resource_group,
- name=self.webapp_name,
- slot=self.name,
- target_slot=self.swap['target_slot'],
- preserve_vnet=self.swap['preserve_vnet'])
- elif self.swap['action'] == 'reset':
- if self.swap['target_slot'] is None:
- response = self.web_client.web_apps.reset_production_slot_config(resource_group_name=self.resource_group,
- name=self.webapp_name)
- else:
- response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group,
- name=self.webapp_name,
- slot=self.swap['target_slot'])
- response = self.web_client.web_apps.reset_slot_configuration_slot(resource_group_name=self.resource_group,
- name=self.webapp_name,
- slot=self.name)
-
- self.log("Response : {0}".format(response))
-
- return response
- except CloudError as ex:
- self.fail("Failed to swap web app slot {0} in resource group {1}: {2}".format(self.name, self.resource_group, str(ex)))
-
- def clone_slot(self):
- if self.configuration_source:
- src_slot = None if self.configuration_source.lower() == self.webapp_name.lower() else self.configuration_source
-
- if src_slot is None:
- site_config_clone_from = self.get_configuration()
- else:
- site_config_clone_from = self.get_configuration_slot(slot_name=src_slot)
-
- self.update_configuration_slot(site_config=site_config_clone_from)
-
- if src_slot is None:
- app_setting_clone_from = self.list_app_settings()
- else:
- app_setting_clone_from = self.list_app_settings_slot(src_slot)
-
- if self.app_settings:
- app_setting_clone_from.update(self.app_settings)
-
- self.update_app_settings_slot(app_settings=app_setting_clone_from)
-
-
-def main():
- """Main execution"""
- AzureRMWebAppSlots()
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/ansible/plugins/doc_fragments/azure.py b/lib/ansible/plugins/doc_fragments/azure.py
deleted file mode 100644
index c990a05641..0000000000
--- a/lib/ansible/plugins/doc_fragments/azure.py
+++ /dev/null
@@ -1,113 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016 Matt Davis, <mdavis@ansible.com>
-# Copyright: (c) 2016 Chris Houseknecht, <house@redhat.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-
-class ModuleDocFragment(object):
-
- # Azure doc fragment
- DOCUMENTATION = r'''
-
-options:
- ad_user:
- description:
- - Active Directory username. Use when authenticating with an Active Directory user rather than service
- principal.
- type: str
- password:
- description:
- - Active Directory user password. Use when authenticating with an Active Directory user rather than service
- principal.
- type: str
- profile:
- description:
- - Security profile found in ~/.azure/credentials file.
- type: str
- subscription_id:
- description:
- - Your Azure subscription Id.
- type: str
- client_id:
- description:
- - Azure client ID. Use when authenticating with a Service Principal.
- type: str
- secret:
- description:
- - Azure client secret. Use when authenticating with a Service Principal.
- type: str
- tenant:
- description:
- - Azure tenant ID. Use when authenticating with a Service Principal.
- type: str
- cloud_environment:
- description:
- - For cloud environments other than the US public cloud, the environment name (as defined by Azure Python SDK, eg, C(AzureChinaCloud),
- C(AzureUSGovernment)), or a metadata discovery endpoint URL (required for Azure Stack). Can also be set via credential file profile or
- the C(AZURE_CLOUD_ENVIRONMENT) environment variable.
- type: str
- default: AzureCloud
- version_added: '2.4'
- adfs_authority_url:
- description:
- - Azure AD authority url. Use when authenticating with Username/password, and has your own ADFS authority.
- type: str
- version_added: '2.6'
- cert_validation_mode:
- description:
- - Controls the certificate validation behavior for Azure endpoints. By default, all modules will validate the server certificate, but
- when an HTTPS proxy is in use, or against Azure Stack, it may be necessary to disable this behavior by passing C(ignore). Can also be
- set via credential file profile or the C(AZURE_CERT_VALIDATION) environment variable.
- type: str
- choices: [ ignore, validate ]
- version_added: '2.5'
- auth_source:
- description:
- - Controls the source of the credentials to use for authentication.
- - If not specified, ANSIBLE_AZURE_AUTH_SOURCE environment variable will be used and default to C(auto) if variable is not defined.
- - C(auto) will follow the default precedence of module parameters -> environment variables -> default profile in credential file
- C(~/.azure/credentials).
- - When set to C(cli), the credentials will be sources from the default Azure CLI profile.
- - Can also be set via the C(ANSIBLE_AZURE_AUTH_SOURCE) environment variable.
- - When set to C(msi), the host machine must be an azure resource with an enabled MSI extension. C(subscription_id) or the
- environment variable C(AZURE_SUBSCRIPTION_ID) can be used to identify the subscription ID if the resource is granted
- access to more than one subscription, otherwise the first subscription is chosen.
- - The C(msi) was added in Ansible 2.6.
- type: str
- choices:
- - auto
- - cli
- - credential_file
- - env
- - msi
- version_added: '2.5'
- api_profile:
- description:
- - Selects an API profile to use when communicating with Azure services. Default value of C(latest) is appropriate for public clouds;
- future values will allow use with Azure Stack.
- type: str
- default: latest
- version_added: '2.5'
-requirements:
- - python >= 2.7
- - azure >= 2.0.0
-
-notes:
- - For authentication with Azure you can pass parameters, set environment variables, use a profile stored
- in ~/.azure/credentials, or log in before you run your tasks or playbook with C(az login).
- - Authentication is also possible using a service principal or Active Directory user.
- - To authenticate via service principal, pass subscription_id, client_id, secret and tenant or set environment
- variables AZURE_SUBSCRIPTION_ID, AZURE_CLIENT_ID, AZURE_SECRET and AZURE_TENANT.
- - To authenticate via Active Directory user, pass ad_user and password, or set AZURE_AD_USER and
- AZURE_PASSWORD in the environment.
- - "Alternatively, credentials can be stored in ~/.azure/credentials. This is an ini file containing
- a [default] section and the following keys: subscription_id, client_id, secret and tenant or
- subscription_id, ad_user and password. It is also possible to add additional profiles. Specify the profile
- by passing profile or setting AZURE_PROFILE in the environment."
-
-seealso:
- - name: Sign in with Azure CLI
- link: https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest
- description: How to authenticate using the C(az login) command.
- '''
diff --git a/lib/ansible/plugins/doc_fragments/azure_tags.py b/lib/ansible/plugins/doc_fragments/azure_tags.py
deleted file mode 100644
index ea4268c8ca..0000000000
--- a/lib/ansible/plugins/doc_fragments/azure_tags.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2016, Matt Davis, <mdavis@ansible.com>
-# Copyright: (c) 2016, Chris Houseknecht, <house@redhat.com>
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-
-class ModuleDocFragment(object):
-
- # Azure doc fragment
- DOCUMENTATION = r'''
-options:
- tags:
- description:
- - Dictionary of string:string pairs to assign as metadata to the object.
- - Metadata tags on the object will be updated with any provided values.
- - To remove tags set append_tags option to false.
- type: dict
- append_tags:
- description:
- - Use to control if tags field is canonical or just appends to existing tags.
- - When canonical, any tags not found in the tags parameter will be removed from the object's metadata.
- type: bool
- default: yes
- '''
diff --git a/lib/ansible/plugins/inventory/azure_rm.py b/lib/ansible/plugins/inventory/azure_rm.py
deleted file mode 100644
index f6ffdaddf4..0000000000
--- a/lib/ansible/plugins/inventory/azure_rm.py
+++ /dev/null
@@ -1,645 +0,0 @@
-# Copyright (c) 2018 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = r'''
- name: azure_rm
- plugin_type: inventory
- short_description: Azure Resource Manager inventory plugin
- extends_documentation_fragment:
- - azure
- description:
- - Query VM details from Azure Resource Manager
- - Requires a YAML configuration file whose name ends with 'azure_rm.(yml|yaml)'
- - By default, sets C(ansible_host) to the first public IP address found (preferring the primary NIC). If no
- public IPs are found, the first private IP (also preferring the primary NIC). The default may be overridden
- via C(hostvar_expressions); see examples.
- options:
- plugin:
- description: marks this as an instance of the 'azure_rm' plugin
- required: true
- choices: ['azure_rm']
- include_vm_resource_groups:
- description: A list of resource group names to search for virtual machines. '\*' will include all resource
- groups in the subscription.
- default: ['*']
- include_vmss_resource_groups:
- description: A list of resource group names to search for virtual machine scale sets (VMSSs). '\*' will
- include all resource groups in the subscription.
- default: []
- fail_on_template_errors:
- description: When false, template failures during group and filter processing are silently ignored (eg,
- if a filter or group expression refers to an undefined host variable)
- choices: [True, False]
- default: True
- keyed_groups:
- description: Creates groups based on the value of a host variable. Requires a list of dictionaries,
- defining C(key) (the source dictionary-typed variable), C(prefix) (the prefix to use for the new group
- name), and optionally C(separator) (which defaults to C(_))
- conditional_groups:
- description: A mapping of group names to Jinja2 expressions. When the mapped expression is true, the host
- is added to the named group.
- hostvar_expressions:
- description: A mapping of hostvar names to Jinja2 expressions. The value for each host is the result of the
- Jinja2 expression (which may refer to any of the host's existing variables at the time this inventory
- plugin runs).
- exclude_host_filters:
- description: Excludes hosts from the inventory with a list of Jinja2 conditional expressions. Each
- expression in the list is evaluated for each host; when the expression is true, the host is excluded
- from the inventory.
- default: []
- batch_fetch:
- description: To improve performance, results are fetched using an unsupported batch API. Disabling
- C(batch_fetch) uses a much slower serial fetch, resulting in many more round-trips. Generally only
- useful for troubleshooting.
- default: true
- default_host_filters:
- description: A default set of filters that is applied in addition to the conditions in
- C(exclude_host_filters) to exclude powered-off and not-fully-provisioned hosts. Set this to a different
- value or empty list if you need to include hosts in these states.
- default: ['powerstate != "running"', 'provisioning_state != "succeeded"']
- use_contrib_script_compatible_sanitization:
- description:
- - By default this plugin is using a general group name sanitization to create safe and usable group names for use in Ansible.
- This option allows you to override that, in efforts to allow migration from the old inventory script and
- matches the sanitization of groups when the script's ``replace_dash_in_groups`` option is set to ``False``.
- To replicate behavior of ``replace_dash_in_groups = True`` with constructed groups,
- you will need to replace hyphens with underscores via the regex_replace filter for those entries.
- - For this to work you should also turn off the TRANSFORM_INVALID_GROUP_CHARS setting,
- otherwise the core engine will just use the standard sanitization on top.
- - This is not the default as such names break certain functionality as not all characters are valid Python identifiers
- which group names end up being used as.
- type: bool
- default: False
- version_added: '2.8'
- plain_host_names:
- description:
- - By default this plugin will use globally unique host names.
- This option allows you to override that, and use the name that matches the old inventory script naming.
- - This is not the default, as these names are not truly unique, and can conflict with other hosts.
- The default behavior will add extra hashing to the end of the hostname to prevent such conflicts.
- type: bool
- default: False
- version_added: '2.8'
-'''
-
-EXAMPLES = '''
-# The following host variables are always available:
-# public_ipv4_addresses: all public IP addresses, with the primary IP config from the primary NIC first
-# public_dns_hostnames: all public DNS hostnames, with the primary IP config from the primary NIC first
-# private_ipv4_addresses: all private IP addresses, with the primary IP config from the primary NIC first
-# id: the VM's Azure resource ID, eg /subscriptions/00000000-0000-0000-1111-1111aaaabb/resourceGroups/my_rg/providers/Microsoft.Compute/virtualMachines/my_vm
-# location: the VM's Azure location, eg 'westus', 'eastus'
-# name: the VM's resource name, eg 'myvm'
-# os_profile: The VM OS properties, a dictionary, only system is currently available, eg 'os_profile.system not in ['linux']'
-# powerstate: the VM's current power state, eg: 'running', 'stopped', 'deallocated'
-# provisioning_state: the VM's current provisioning state, eg: 'succeeded'
-# tags: dictionary of the VM's defined tag values
-# resource_type: the VM's resource type, eg: 'Microsoft.Compute/virtualMachine', 'Microsoft.Compute/virtualMachineScaleSets/virtualMachines'
-# vmid: the VM's internal SMBIOS ID, eg: '36bca69d-c365-4584-8c06-a62f4a1dc5d2'
-# vmss: if the VM is a member of a scaleset (vmss), a dictionary including the id and name of the parent scaleset
-
-
-# sample 'myazuresub.azure_rm.yaml'
-
-# required for all azure_rm inventory plugin configs
-plugin: azure_rm
-
-# forces this plugin to use a CLI auth session instead of the automatic auth source selection (eg, prevents the
-# presence of 'ANSIBLE_AZURE_RM_X' environment variables from overriding CLI auth)
-auth_source: cli
-
-# fetches VMs from an explicit list of resource groups instead of default all (- '*')
-include_vm_resource_groups:
-- myrg1
-- myrg2
-
-# fetches VMs from VMSSs in all resource groups (defaults to no VMSS fetch)
-include_vmss_resource_groups:
-- '*'
-
-# places a host in the named group if the associated condition evaluates to true
-conditional_groups:
- # since this will be true for every host, every host sourced from this inventory plugin config will be in the
- # group 'all_the_hosts'
- all_the_hosts: true
- # if the VM's "name" variable contains "dbserver", it will be placed in the 'db_hosts' group
- db_hosts: "'dbserver' in name"
-
-# adds variables to each host found by this inventory plugin, whose values are the result of the associated expression
-hostvar_expressions:
- my_host_var:
- # A statically-valued expression has to be both single and double-quoted, or use escaped quotes, since the outer
- # layer of quotes will be consumed by YAML. Without the second set of quotes, it interprets 'staticvalue' as a
- # variable instead of a string literal.
- some_statically_valued_var: "'staticvalue'"
- # overrides the default ansible_host value with a custom Jinja2 expression, in this case, the first DNS hostname, or
- # if none are found, the first public IP address.
- ansible_host: (public_dns_hostnames + public_ipv4_addresses) | first
-
-# places hosts in dynamically-created groups based on a variable value.
-keyed_groups:
-# places each host in a group named 'tag_(tag name)_(tag value)' for each tag on a VM.
-- prefix: tag
- key: tags
-# places each host in a group named 'azure_loc_(location name)', depending on the VM's location
-- prefix: azure_loc
- key: location
-# places host in a group named 'some_tag_X' using the value of the 'sometag' tag on a VM as X, and defaulting to the
-# value 'none' (eg, the group 'some_tag_none') if the 'sometag' tag is not defined for a VM.
-- prefix: some_tag
- key: tags.sometag | default('none')
-
-# excludes a host from the inventory when any of these expressions is true, can refer to any vars defined on the host
-exclude_host_filters:
-# excludes hosts in the eastus region
-- location in ['eastus']
-# excludes hosts that are powered off
-- powerstate != 'running'
-'''
-
-# FUTURE: do we need a set of sane default filters, separate from the user-defineable ones?
-# eg, powerstate==running, provisioning_state==succeeded
-
-
-import hashlib
-import json
-import re
-import uuid
-
-try:
- from queue import Queue, Empty
-except ImportError:
- from Queue import Queue, Empty
-
-from collections import namedtuple
-from ansible import release
-from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
-from ansible.module_utils.six import iteritems
-from ansible.module_utils.azure_rm_common import AzureRMAuth
-from ansible.errors import AnsibleParserError, AnsibleError
-from ansible.module_utils.parsing.convert_bool import boolean
-from ansible.module_utils._text import to_native, to_bytes
-from itertools import chain
-from msrest import ServiceClient, Serializer, Deserializer
-from msrestazure import AzureConfiguration
-from msrestazure.polling.arm_polling import ARMPolling
-from msrestazure.tools import parse_resource_id
-
-
-class AzureRMRestConfiguration(AzureConfiguration):
- def __init__(self, credentials, subscription_id, base_url=None):
-
- if credentials is None:
- raise ValueError("Parameter 'credentials' must not be None.")
- if subscription_id is None:
- raise ValueError("Parameter 'subscription_id' must not be None.")
- if not base_url:
- base_url = 'https://management.azure.com'
-
- super(AzureRMRestConfiguration, self).__init__(base_url)
-
- self.add_user_agent('ansible-dynamic-inventory/{0}'.format(release.__version__))
-
- self.credentials = credentials
- self.subscription_id = subscription_id
-
-
-UrlAction = namedtuple('UrlAction', ['url', 'api_version', 'handler', 'handler_args'])
-
-
-# FUTURE: add Cacheable support once we have a sane serialization format
-class InventoryModule(BaseInventoryPlugin, Constructable):
-
- NAME = 'azure_rm'
-
- def __init__(self):
- super(InventoryModule, self).__init__()
-
- self._serializer = Serializer()
- self._deserializer = Deserializer()
- self._hosts = []
- self._filters = None
-
- # FUTURE: use API profiles with defaults
- self._compute_api_version = '2017-03-30'
- self._network_api_version = '2015-06-15'
-
- self._default_header_parameters = {'Content-Type': 'application/json; charset=utf-8'}
-
- self._request_queue = Queue()
-
- self.azure_auth = None
-
- self._batch_fetch = False
-
- def verify_file(self, path):
- '''
- :param loader: an ansible.parsing.dataloader.DataLoader object
- :param path: the path to the inventory config file
- :return the contents of the config file
- '''
- if super(InventoryModule, self).verify_file(path):
- if re.match(r'.{0,}azure_rm\.y(a)?ml$', path):
- return True
- # display.debug("azure_rm inventory filename must end with 'azure_rm.yml' or 'azure_rm.yaml'")
- return False
-
- def parse(self, inventory, loader, path, cache=True):
- super(InventoryModule, self).parse(inventory, loader, path)
-
- self._read_config_data(path)
-
- if self.get_option('use_contrib_script_compatible_sanitization'):
- self._sanitize_group_name = self._legacy_script_compatible_group_sanitization
-
- self._batch_fetch = self.get_option('batch_fetch')
-
- self._legacy_hostnames = self.get_option('plain_host_names')
-
- self._filters = self.get_option('exclude_host_filters') + self.get_option('default_host_filters')
-
- try:
- self._credential_setup()
- self._get_hosts()
- except Exception:
- raise
-
- def _credential_setup(self):
- auth_options = dict(
- auth_source=self.get_option('auth_source'),
- profile=self.get_option('profile'),
- subscription_id=self.get_option('subscription_id'),
- client_id=self.get_option('client_id'),
- secret=self.get_option('secret'),
- tenant=self.get_option('tenant'),
- ad_user=self.get_option('ad_user'),
- password=self.get_option('password'),
- cloud_environment=self.get_option('cloud_environment'),
- cert_validation_mode=self.get_option('cert_validation_mode'),
- api_profile=self.get_option('api_profile'),
- adfs_authority_url=self.get_option('adfs_authority_url')
- )
-
- self.azure_auth = AzureRMAuth(**auth_options)
-
- self._clientconfig = AzureRMRestConfiguration(self.azure_auth.azure_credentials, self.azure_auth.subscription_id,
- self.azure_auth._cloud_environment.endpoints.resource_manager)
- self._client = ServiceClient(self._clientconfig.credentials, self._clientconfig)
-
- def _enqueue_get(self, url, api_version, handler, handler_args=None):
- if not handler_args:
- handler_args = {}
- self._request_queue.put_nowait(UrlAction(url=url, api_version=api_version, handler=handler, handler_args=handler_args))
-
- def _enqueue_vm_list(self, rg='*'):
- if not rg or rg == '*':
- url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines'
- else:
- url = '/subscriptions/{subscriptionId}/resourceGroups/{rg}/providers/Microsoft.Compute/virtualMachines'
-
- url = url.format(subscriptionId=self._clientconfig.subscription_id, rg=rg)
- self._enqueue_get(url=url, api_version=self._compute_api_version, handler=self._on_vm_page_response)
-
- def _enqueue_vmss_list(self, rg=None):
- if not rg or rg == '*':
- url = '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachineScaleSets'
- else:
- url = '/subscriptions/{subscriptionId}/resourceGroups/{rg}/providers/Microsoft.Compute/virtualMachineScaleSets'
-
- url = url.format(subscriptionId=self._clientconfig.subscription_id, rg=rg)
- self._enqueue_get(url=url, api_version=self._compute_api_version, handler=self._on_vmss_page_response)
-
- def _get_hosts(self):
- for vm_rg in self.get_option('include_vm_resource_groups'):
- self._enqueue_vm_list(vm_rg)
-
- for vmss_rg in self.get_option('include_vmss_resource_groups'):
- self._enqueue_vmss_list(vmss_rg)
-
- if self._batch_fetch:
- self._process_queue_batch()
- else:
- self._process_queue_serial()
-
- constructable_config_strict = boolean(self.get_option('fail_on_template_errors'))
- constructable_config_compose = self.get_option('hostvar_expressions')
- constructable_config_groups = self.get_option('conditional_groups')
- constructable_config_keyed_groups = self.get_option('keyed_groups')
-
- for h in self._hosts:
- inventory_hostname = self._get_hostname(h)
- if self._filter_host(inventory_hostname, h.hostvars):
- continue
- self.inventory.add_host(inventory_hostname)
- # FUTURE: configurable default IP list? can already do this via hostvar_expressions
- self.inventory.set_variable(inventory_hostname, "ansible_host",
- next(chain(h.hostvars['public_ipv4_addresses'], h.hostvars['private_ipv4_addresses']), None))
- for k, v in iteritems(h.hostvars):
- # FUTURE: configurable hostvar prefix? Makes docs harder...
- self.inventory.set_variable(inventory_hostname, k, v)
-
- # constructable delegation
- self._set_composite_vars(constructable_config_compose, h.hostvars, inventory_hostname, strict=constructable_config_strict)
- self._add_host_to_composed_groups(constructable_config_groups, h.hostvars, inventory_hostname, strict=constructable_config_strict)
- self._add_host_to_keyed_groups(constructable_config_keyed_groups, h.hostvars, inventory_hostname, strict=constructable_config_strict)
-
- # FUTURE: fix underlying inventory stuff to allow us to quickly access known groupvars from reconciled host
- def _filter_host(self, inventory_hostname, hostvars):
- self.templar.available_variables = hostvars
-
- for condition in self._filters:
- # FUTURE: should warn/fail if conditional doesn't return True or False
- conditional = "{{% if {0} %}} True {{% else %}} False {{% endif %}}".format(condition)
- try:
- if boolean(self.templar.template(conditional)):
- return True
- except Exception as e:
- if boolean(self.get_option('fail_on_template_errors')):
- raise AnsibleParserError("Error evaluating filter condition '{0}' for host {1}: {2}".format(condition, inventory_hostname, to_native(e)))
- continue
-
- return False
-
- def _get_hostname(self, host):
- # FUTURE: configurable hostname sources
- return host.default_inventory_hostname
-
- def _process_queue_serial(self):
- try:
- while True:
- item = self._request_queue.get_nowait()
- resp = self.send_request(item.url, item.api_version)
- item.handler(resp, **item.handler_args)
- except Empty:
- pass
-
- def _on_vm_page_response(self, response, vmss=None):
- next_link = response.get('nextLink')
-
- if next_link:
- self._enqueue_get(url=next_link, api_version=self._compute_api_version, handler=self._on_vm_page_response)
-
- if 'value' in response:
- for h in response['value']:
- # FUTURE: add direct VM filtering by tag here (performance optimization)?
- self._hosts.append(AzureHost(h, self, vmss=vmss, legacy_name=self._legacy_hostnames))
-
- def _on_vmss_page_response(self, response):
- next_link = response.get('nextLink')
-
- if next_link:
- self._enqueue_get(url=next_link, api_version=self._compute_api_version, handler=self._on_vmss_page_response)
-
- # FUTURE: add direct VMSS filtering by tag here (performance optimization)?
- for vmss in response['value']:
- url = '{0}/virtualMachines'.format(vmss['id'])
- # VMSS instances look close enough to regular VMs that we can share the handler impl...
- self._enqueue_get(url=url, api_version=self._compute_api_version, handler=self._on_vm_page_response, handler_args=dict(vmss=vmss))
-
- # use the undocumented /batch endpoint to bulk-send up to 500 requests in a single round-trip
- #
- def _process_queue_batch(self):
- while True:
- batch_requests = []
- batch_item_index = 0
- batch_response_handlers = dict()
- try:
- while batch_item_index < 100:
- item = self._request_queue.get_nowait()
-
- name = str(uuid.uuid4())
- query_parameters = {'api-version': item.api_version}
- req = self._client.get(item.url, query_parameters)
- batch_requests.append(dict(httpMethod="GET", url=req.url, name=name))
- batch_response_handlers[name] = item
- batch_item_index += 1
- except Empty:
- pass
-
- if not batch_requests:
- break
-
- batch_resp = self._send_batch(batch_requests)
-
- key_name = None
- if 'responses' in batch_resp:
- key_name = 'responses'
- elif 'value' in batch_resp:
- key_name = 'value'
- else:
- raise AnsibleError("didn't find expected key responses/value in batch response")
-
- for idx, r in enumerate(batch_resp[key_name]):
- status_code = r.get('httpStatusCode')
- returned_name = r['name']
- result = batch_response_handlers[returned_name]
- if status_code != 200:
- # FUTURE: error-tolerant operation mode (eg, permissions)
- raise AnsibleError("a batched request failed with status code {0}, url {1}".format(status_code, result.url))
- # FUTURE: store/handle errors from individual handlers
- result.handler(r['content'], **result.handler_args)
-
- def _send_batch(self, batched_requests):
- url = '/batch'
- query_parameters = {'api-version': '2015-11-01'}
-
- body_obj = dict(requests=batched_requests)
-
- body_content = self._serializer.body(body_obj, 'object')
-
- header = {'x-ms-client-request-id': str(uuid.uuid4())}
- header.update(self._default_header_parameters)
-
- request = self._client.post(url, query_parameters)
- initial_response = self._client.send(request, header, body_content)
-
- # FUTURE: configurable timeout?
- poller = ARMPolling(timeout=2)
- poller.initialize(client=self._client,
- initial_response=initial_response,
- deserialization_callback=lambda r: self._deserializer('object', r))
-
- poller.run()
-
- return poller.resource()
-
- def send_request(self, url, api_version):
- query_parameters = {'api-version': api_version}
- req = self._client.get(url, query_parameters)
- resp = self._client.send(req, self._default_header_parameters, stream=False)
-
- resp.raise_for_status()
- content = resp.content
-
- return json.loads(content)
-
- @staticmethod
- def _legacy_script_compatible_group_sanitization(name):
-
- # note that while this mirrors what the script used to do, it has many issues with unicode and usability in python
- regex = re.compile(r"[^A-Za-z0-9\_\-]")
-
- return regex.sub('_', name)
-
-# VM list (all, N resource groups): VM -> InstanceView, N NICs, N PublicIPAddress)
-# VMSS VMs (all SS, N specific SS, N resource groups?): SS -> VM -> InstanceView, N NICs, N PublicIPAddress)
-
-
-class AzureHost(object):
- _powerstate_regex = re.compile('^PowerState/(?P<powerstate>.+)$')
-
- def __init__(self, vm_model, inventory_client, vmss=None, legacy_name=False):
- self._inventory_client = inventory_client
- self._vm_model = vm_model
- self._vmss = vmss
-
- self._instanceview = None
-
- self._powerstate = "unknown"
- self.nics = []
-
- if legacy_name:
- self.default_inventory_hostname = vm_model['name']
- else:
- # Azure often doesn't provide a globally-unique filename, so use resource name + a chunk of ID hash
- self.default_inventory_hostname = '{0}_{1}'.format(vm_model['name'], hashlib.sha1(to_bytes(vm_model['id'])).hexdigest()[0:4])
-
- self._hostvars = {}
-
- inventory_client._enqueue_get(url="{0}/instanceView".format(vm_model['id']),
- api_version=self._inventory_client._compute_api_version,
- handler=self._on_instanceview_response)
-
- nic_refs = vm_model['properties']['networkProfile']['networkInterfaces']
- for nic in nic_refs:
- # single-nic instances don't set primary, so figure it out...
- is_primary = nic.get('properties', {}).get('primary', len(nic_refs) == 1)
- inventory_client._enqueue_get(url=nic['id'], api_version=self._inventory_client._network_api_version,
- handler=self._on_nic_response,
- handler_args=dict(is_primary=is_primary))
-
- @property
- def hostvars(self):
- if self._hostvars != {}:
- return self._hostvars
-
- system = "unknown"
- if 'osProfile' in self._vm_model['properties']:
- if 'linuxConfiguration' in self._vm_model['properties']['osProfile']:
- system = 'linux'
- if 'windowsConfiguration' in self._vm_model['properties']['osProfile']:
- system = 'windows'
-
- new_hostvars = dict(
- public_ipv4_addresses=[],
- public_dns_hostnames=[],
- private_ipv4_addresses=[],
- id=self._vm_model['id'],
- location=self._vm_model['location'],
- name=self._vm_model['name'],
- powerstate=self._powerstate,
- provisioning_state=self._vm_model['properties']['provisioningState'].lower(),
- tags=self._vm_model.get('tags', {}),
- resource_type=self._vm_model.get('type', "unknown"),
- vmid=self._vm_model['properties']['vmId'],
- os_profile=dict(
- system=system,
- ),
- vmss=dict(
- id=self._vmss['id'],
- name=self._vmss['name'],
- ) if self._vmss else {},
- virtual_machine_size=self._vm_model['properties']['hardwareProfile']['vmSize'] if self._vm_model['properties'].get('hardwareProfile') else None,
- plan=self._vm_model['properties']['plan']['name'] if self._vm_model['properties'].get('plan') else None,
- resource_group=parse_resource_id(self._vm_model['id']).get('resource_group').lower()
- )
-
- # set nic-related values from the primary NIC first
- for nic in sorted(self.nics, key=lambda n: n.is_primary, reverse=True):
- # and from the primary IP config per NIC first
- for ipc in sorted(nic._nic_model['properties']['ipConfigurations'], key=lambda i: i['properties'].get('primary', False), reverse=True):
- private_ip = ipc['properties'].get('privateIPAddress')
- if private_ip:
- new_hostvars['private_ipv4_addresses'].append(private_ip)
- pip_id = ipc['properties'].get('publicIPAddress', {}).get('id')
- if pip_id:
- new_hostvars['public_ip_id'] = pip_id
-
- pip = nic.public_ips[pip_id]
- new_hostvars['public_ip_name'] = pip._pip_model['name']
- new_hostvars['public_ipv4_addresses'].append(pip._pip_model['properties'].get('ipAddress', None))
- pip_fqdn = pip._pip_model['properties'].get('dnsSettings', {}).get('fqdn')
- if pip_fqdn:
- new_hostvars['public_dns_hostnames'].append(pip_fqdn)
-
- new_hostvars['mac_address'] = nic._nic_model['properties'].get('macAddress')
- new_hostvars['network_interface'] = nic._nic_model['name']
- new_hostvars['network_interface_id'] = nic._nic_model['id']
- new_hostvars['security_group_id'] = nic._nic_model['properties']['networkSecurityGroup']['id'] \
- if nic._nic_model['properties'].get('networkSecurityGroup') else None
- new_hostvars['security_group'] = parse_resource_id(new_hostvars['security_group_id'])['resource_name'] \
- if nic._nic_model['properties'].get('networkSecurityGroup') else None
-
- # set image and os_disk
- new_hostvars['image'] = {}
- new_hostvars['os_disk'] = {}
- storageProfile = self._vm_model['properties'].get('storageProfile')
- if storageProfile:
- imageReference = storageProfile.get('imageReference')
- if imageReference:
- if imageReference.get('publisher'):
- new_hostvars['image'] = dict(
- sku=imageReference.get('sku'),
- publisher=imageReference.get('publisher'),
- version=imageReference.get('version'),
- offer=imageReference.get('offer')
- )
- elif imageReference.get('id'):
- new_hostvars['image'] = dict(
- id=imageReference.get('id')
- )
-
- osDisk = storageProfile.get('osDisk')
- new_hostvars['os_disk'] = dict(
- name=osDisk.get('name'),
- operating_system_type=osDisk.get('osType').lower() if osDisk.get('osType') else None
- )
-
- self._hostvars = new_hostvars
-
- return self._hostvars
-
- def _on_instanceview_response(self, vm_instanceview_model):
- self._instanceview = vm_instanceview_model
- self._powerstate = next((self._powerstate_regex.match(s.get('code', '')).group('powerstate')
- for s in vm_instanceview_model.get('statuses', []) if self._powerstate_regex.match(s.get('code', ''))), 'unknown')
-
- def _on_nic_response(self, nic_model, is_primary=False):
- nic = AzureNic(nic_model=nic_model, inventory_client=self._inventory_client, is_primary=is_primary)
- self.nics.append(nic)
-
-
-class AzureNic(object):
- def __init__(self, nic_model, inventory_client, is_primary=False):
- self._nic_model = nic_model
- self.is_primary = is_primary
- self._inventory_client = inventory_client
-
- self.public_ips = {}
-
- if nic_model.get('properties', {}).get('ipConfigurations'):
- for ipc in nic_model['properties']['ipConfigurations']:
- pip = ipc['properties'].get('publicIPAddress')
- if pip:
- self._inventory_client._enqueue_get(url=pip['id'], api_version=self._inventory_client._network_api_version, handler=self._on_pip_response)
-
- def _on_pip_response(self, pip_model):
- self.public_ips[pip_model['id']] = AzurePip(pip_model)
-
-
-class AzurePip(object):
- def __init__(self, pip_model):
- self._pip_model = pip_model
diff --git a/test/integration/targets/azure_rm_acs/aliases b/test/integration/targets/azure_rm_acs/aliases
deleted file mode 100644
index 70048be663..0000000000
--- a/test/integration/targets/azure_rm_acs/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/azure
-destructive
-unsupported
diff --git a/test/integration/targets/azure_rm_acs/meta/main.yml b/test/integration/targets/azure_rm_acs/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_acs/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_acs/tasks/main.yml b/test/integration/targets/azure_rm_acs/tasks/main.yml
deleted file mode 100644
index a35871c09b..0000000000
--- a/test/integration/targets/azure_rm_acs/tasks/main.yml
+++ /dev/null
@@ -1,149 +0,0 @@
- - name: Create an ACS instance - DCOS
- azure_rm_acs:
- name: "acsdcos{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- resource_group: "{{ resource_group }}"
- location: eastus
- orchestration_platform: DCOS
- master_profile:
- - count: 1
- dns_prefix: "acsdcos{{ resource_group | hash('md5') | truncate(10, True, '') }}"
- vm_size: Standard_A0
- linux_profile:
- - admin_username: azureuser
- ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+io238wdhjkasndq238e2/983289dasjnasey823/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+928dfsjsejk298r/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+dsajda82e78sdja/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt juliens@msft.com
- agent_pool_profiles:
- - name: default
- count: 1
- dns_prefix: "acsdcos{{ resource_group | hash('md5') | truncate(12, True, '') }}"
- vm_size: Standard_A0
- diagnostics_profile: false
- register: output
-
- - name: Assert the ACS instance is well created
- assert:
- that:
- - output.changed
- - output.state.provisioning_state == 'Succeeded'
-
- - name: Scale the ACS instance from 1 to 2 - DCOS
- azure_rm_acs:
- name: "acsdcos{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- resource_group: "{{ resource_group }}"
- location: eastus
- orchestration_platform: DCOS
- master_profile:
- - count: 1
- dns_prefix: "acsdcos{{ resource_group | hash('md5') | truncate(10, True, '') }}"
- vm_size: Standard_A0
- linux_profile:
- - admin_username: azureuser
- ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+io238wdhjkasndq238e2/983289dasjnasey823/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+928dfsjsejk298r/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+dsajda82e78sdja/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt juliens@msft.com
- agent_pool_profiles:
- - name: default
- count: 2
- dns_prefix: "acsdcos{{ resource_group | hash('md5') | truncate(12, True, '') }}"
- vm_size: Standard_A0
- diagnostics_profile: false
- register: output
-
- - name: Assert the ACS instance is well scaled
- assert:
- that:
- - output.changed
- - output.state.agent_pool_profiles[0].count == 2
-
- - name: Delete the DCOS ACS instance - DCOS
- azure_rm_acs:
- name: "acsdcos{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- resource_group: "{{ resource_group }}"
- location: eastus
- orchestration_platform: DCOS
- state: absent
- master_profile:
- - count: 1
- dns_prefix: "acsdcos{{ resource_group | hash('md5') | truncate(10, True, '') }}"
- vm_size: Standard_A0
- linux_profile:
- - admin_username: azureuser
- ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+io238wdhjkasndq238e2/983289dasjnasey823/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+928dfsjsejk298r/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+dsajda82e78sdja/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt juliens@msft.com
- agent_pool_profiles:
- - name: default
- count: 2
- dns_prefix: "acsdcos{{ resource_group | hash('md5') | truncate(12, True, '') }}"
- vm_size: Standard_A0
- diagnostics_profile: false
-
- - name: Create an ACS instance - Swarm
- azure_rm_acs:
- name: "acssw{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- resource_group: "{{ resource_group }}"
- location: eastus
- orchestration_platform: Swarm
- master_profile:
- - count: 1
- dns_prefix: "acssw{{ resource_group | hash('md5') | truncate(10, True, '') }}"
- vm_size: Standard_A0
- linux_profile:
- - admin_username: azureuser
- ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+io238wdhjkasndq238e2/983289dasjnasey823/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+928dfsjsejk298r/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+dsajda82e78sdja/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt juliens@msft.com
- agent_pool_profiles:
- - name: default
- count: 1
- dns_prefix: "acssw{{ resource_group | hash('md5') | truncate(12, True, '') }}"
- vm_size: Standard_A0
- diagnostics_profile: false
- register: output
-
- - name: Assert the ACS instance is well created
- assert:
- that:
- - output.changed
- - output.state.provisioning_state == 'Succeeded'
-
- - name: Scale the ACS instance from 1 to 2 - Swarm
- azure_rm_acs:
- name: "acssw{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- resource_group: "{{ resource_group }}"
- location: eastus
- orchestration_platform: Swarm
- master_profile:
- - count: 1
- dns_prefix: "acssw{{ resource_group | hash('md5') | truncate(10, True, '') }}"
- vm_size: Standard_A0
- linux_profile:
- - admin_username: azureuser
- ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+io238wdhjkasndq238e2/983289dasjnasey823/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+928dfsjsejk298r/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+dsajda82e78sdja/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt juliens@msft.com
- agent_pool_profiles:
- - name: default
- count: 2
- dns_prefix: "acssw{{ resource_group | hash('md5') | truncate(12, True, '') }}"
- vm_size: Standard_A0
- diagnostics_profile: false
- register: output
-
- - name: Assert the ACS instance is well scaled
- assert:
- that:
- - output.changed
- - output.state.agent_pool_profiles[0].count == 2
-
- - name: Delete the ACS instance - Swarm
- azure_rm_acs:
- name: "acssw{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- resource_group: "{{ resource_group }}"
- location: eastus
- orchestration_platform: Swarm
- state: absent
- master_profile:
- - count: 1
- dns_prefix: "acssw{{ resource_group | hash('md5') | truncate(10, True, '') }}"
- vm_size: Standard_A0
- linux_profile:
- - admin_username: azureuser
- ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+io238wdhjkasndq238e2/983289dasjnasey823/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+928dfsjsejk298r/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+dsajda82e78sdja/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt juliens@msft.com
- agent_pool_profiles:
- - name: default
- count: 2
- dns_prefix: "acssw{{ resource_group | hash('md5') | truncate(12, True, '') }}"
- vm_size: Standard_A0
- diagnostics_profile: false \ No newline at end of file
diff --git a/test/integration/targets/azure_rm_aks/aliases b/test/integration/targets/azure_rm_aks/aliases
deleted file mode 100644
index e340069a05..0000000000
--- a/test/integration/targets/azure_rm_aks/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/azure
-unsupported
-destructive \ No newline at end of file
diff --git a/test/integration/targets/azure_rm_aks/meta/main.yml b/test/integration/targets/azure_rm_aks/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_aks/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_aks/tasks/main.yml b/test/integration/targets/azure_rm_aks/tasks/main.yml
deleted file mode 100644
index 082f8ef185..0000000000
--- a/test/integration/targets/azure_rm_aks/tasks/main.yml
+++ /dev/null
@@ -1,213 +0,0 @@
- - set_fact:
- rpfx: "{{ resource_group | hash('md5') | truncate(8, True, '') }}"
-
- - name: Find available k8s version
- azure_rm_aksversion_info:
- location: eastus
- register: versions
-
- - name: Create an AKS instance (check mode)
- azure_rm_aks:
- name: "aks{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- location: eastus
- dns_prefix: "aks{{ rpfx }}"
- kubernetes_version: "{{ versions.azure_aks_versions[0] }}"
- service_principal:
- client_id: "{{ azure_client_id }}"
- client_secret: "{{ azure_secret }}"
- linux_profile:
- admin_username: azureuser
- ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible
- agent_pool_profiles:
- - name: default
- count: 1
- vm_size: Standard_DS1_v2
- enable_rbac: yes
- check_mode: yes
-
- - name: Check there is no AKS created
- azure_rm_aks_info:
- name: "aks{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- register: fact
-
- - name: Check there is no AKS created
- assert:
- that:
- - "fact.aks | length == 0"
-
- - name: Create an AKS instance
- azure_rm_aks:
- name: "aks{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- location: eastus
- dns_prefix: "aks{{ rpfx }}"
- kubernetes_version: "{{ versions.azure_aks_versions[0] }}"
- service_principal:
- client_id: "{{ azure_client_id }}"
- client_secret: "{{ azure_secret }}"
- linux_profile:
- admin_username: azureuser
- ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible
- agent_pool_profiles:
- - name: default
- count: 1
- vm_size: Standard_DS1_v2
- enable_rbac: yes
- register: output
-
- - name: Assert the AKS instance is well created
- assert:
- that:
- - output.changed
- - output.provisioning_state == 'Succeeded'
-
- - name: Get AKS fact
- azure_rm_aks_info:
- name: "aks{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- register: fact
-
- - name: Assert fact returns the created one
- assert:
- that:
- - "fact.aks | length == 1"
- - fact.aks[0].id == output.id
-
- - name: Create an AKS instance (idempotent)
- azure_rm_aks:
- name: "aks{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- location: eastus
- dns_prefix: "aks{{ rpfx }}"
- kubernetes_version: "{{ versions.azure_aks_versions[0] }}"
- service_principal:
- client_id: "{{ azure_client_id }}"
- linux_profile:
- admin_username: azureuser
- ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible
- agent_pool_profiles:
- - name: default
- count: 1
- vm_size: Standard_DS1_v2
- enable_rbac: yes
- register: output
-
- - name: Assert idempotent
- assert:
- that:
- - not output.changed
-
- - name: Get available version
- azure_rm_aksversion_info:
- location: eastus
- version: "{{ versions.azure_aks_versions[0] }}"
- register: version1
-
- - name: Upgrade the AKS instance with addon
- azure_rm_aks:
- name: "aks{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- location: eastus
- dns_prefix: "aks{{ rpfx }}"
- kubernetes_version: "{{ version1.azure_aks_versions[0] }}"
- service_principal:
- client_id: "{{ azure_client_id }}"
- linux_profile:
- admin_username: azureuser
- ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible
- agent_pool_profiles:
- - name: default
- count: 1
- vm_size: Standard_DS1_v2
- addon:
- http_application_routing: {}
- network_profile:
- network_plugin: kubenet
- enable_rbac: yes
- register: output
-
- - name: Assert the AKS instance is upgraded
- assert:
- that:
- - output.changed
- - output.kubernetes_version == version1.azure_aks_versions[0]
- - output.addon.httpApplicationRouting.enabled == True
- - output.agent_pool_profiles[0].count == 1
- - output.network_profile.network_plugin == 'kubenet'
-
- - name: Upgrade the AKS instance with addon (idempontent)
- azure_rm_aks:
- name: "aks{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- location: eastus
- dns_prefix: "aks{{ rpfx }}"
- kubernetes_version: "{{ version1.azure_aks_versions[0] }}"
- service_principal:
- client_id: "{{ azure_client_id }}"
- linux_profile:
- admin_username: azureuser
- ssh_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSPmiqkvDH1/+MDAVDZT8381aYqp73Odz8cnD5hegNhqtXajqtiH0umVg7HybX3wt1HjcrwKJovZURcIbbcDvzdH2bnYbF93T4OLXA0bIfuIp6M86x1iutFtXdpN3TTicINrmSXEE2Ydm51iMu77B08ZERjVaToya2F7vC+egfoPvibf7OLxE336a5tPCywavvNihQjL8sjgpDT5AAScjb3YqK/6VLeQ18Ggt8/ufINsYkb+9/Ji/3OcGFeflnDXq80vPUyF3u4iIylob6RSZenC38cXmQB05tRNxS1B6BXCjMRdy0v4pa7oKM2GA4ADKpNrr0RI9ed+peRFwmsclH test@ansible
- agent_pool_profiles:
- - name: default
- count: 1
- vm_size: Standard_DS1_v2
- addon:
- http_application_routing: {}
- network_profile:
- network_plugin: kubenet
- enable_rbac: yes
- register: output
-
- - assert:
- that:
- - not output.changed
-
- - name: Get AKS fact
- azure_rm_aks_info:
- name: "aks{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- show_kubeconfig: user
- register: fact
-
- - name: Assert fact returns the created one
- assert:
- that:
- - "fact.aks | length == 1"
- - fact.aks[0].kube_config == output.kube_config
-
- - name: Delete the AKS instance
- azure_rm_aks:
- name: "aks{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- state: absent
- register: output
-
- - name: Assert the AKS instance is well deleted
- assert:
- that:
- - output.changed
-
- - name: Delete the AKS instance (idempotent)
- azure_rm_aks:
- name: "aks{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- state: absent
- register: output
-
- - name: Assert idempotent
- assert:
- that:
- - not output.changed
-
- - name: Get AKS fact
- azure_rm_aks_info:
- name: "aks{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- register: fact
-
- - name: Assert fact returns empty
- assert:
- that:
- - "fact.aks | length == 0"
diff --git a/test/integration/targets/azure_rm_appgateway/aliases b/test/integration/targets/azure_rm_appgateway/aliases
deleted file mode 100644
index 3b050cbc18..0000000000
--- a/test/integration/targets/azure_rm_appgateway/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/azure
-destructive
-shippable/azure/group6
diff --git a/test/integration/targets/azure_rm_appgateway/files/cert1.txt b/test/integration/targets/azure_rm_appgateway/files/cert1.txt
deleted file mode 100644
index 82a13f4cec..0000000000
--- a/test/integration/targets/azure_rm_appgateway/files/cert1.txt
+++ /dev/null
@@ -1 +0,0 @@
-MIIMAjCCCeqgAwIBAgITLQAAMpnXBx230XCKQgAAAAAymTANBgkqhkiG9w0BAQsFADCBizELMAkGA1UEBhMCVVMxEzARBgNVBAgTCldhc2hpbmd0b24xEDAOBgNVBAcTB1JlZG1vbmQxHjAcBgNVBAoTFU1pY3Jvc29mdCBDb3Jwb3JhdGlvbjEVMBMGA1UECxMMTWljcm9zb2Z0IElUMR4wHAYDVQQDExVNaWNyb3NvZnQgSVQgVExTIENBIDUwHhcNMTcwNzIwMTc0NzA4WhcNMTkwNzEwMTc0NzA4WjAXMRUwEwYDVQQDEwx3d3cuYmluZy5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC6jsg+/7DlIrdgFOcaDlK3RQ9sIgkJsgpj+ZxAbIe3ziyimIxjVlHX87pqgXcNhaYNbCFD0iPm+aUfbv4GDTLR+AIr8eSegqxZ+CBToYM67NhpVYra1KAvY4XgqxorO4FB9IWYJRqhI3SZeZ3lLK5t9XuUMicG8l52nJfpPdXXvBca2wUCq8FHEObG81vJzESA0htLLPTjdUWBQnXPiW5bqzlGHzzv8ISV6jtDLNNa5JRlhSlXho+6pCedhNF7MP4yTaantPvAELLRWX13VhjgoCcRCCu0s8rxW5DuVWl2Pb2iw35MFnNWlcoVwq0AjAfGA+xEba/WLid6qfkQctYjAgMBAAGjggfQMIIHzDAdBgNVHQ4EFgQUCYflhSl4MCAls91+3GztpSmoA3AwCwYDVR0PBAQDAgSwMB8GA1UdIwQYMBaAFAj+JZ906ocEwry7jqg4XzPG0WxlMIGsBgNVHR8EgaQwgaEwgZ6ggZuggZiGS2h0dHA6Ly9tc2NybC5taWNyb3NvZnQuY29tL3BraS9tc2NvcnAvY3JsL01pY3Jvc29mdCUyMElUJTIwVExTJTIwQ0ElMjA1LmNybIZJaHR0cDovL2NybC5taWNyb3NvZnQuY29tL3BraS9tc2NvcnAvY3JsL01pY3Jvc29mdCUyMElUJTIwVExTJTIwQ0ElMjA1LmNybDCBhQYIKwYBBQUHAQEEeTB3MFEGCCsGAQUFBzAChkVodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpL21zY29ycC9NaWNyb3NvZnQlMjBJVCUyMFRMUyUyMENBJTIwNS5jcnQwIgYIKwYBBQUHMAGGFmh0dHA6Ly9vY3NwLm1zb2NzcC5jb20wPgYJKwYBBAGCNxUHBDEwLwYnKwYBBAGCNxUIh9qGdYPu2QGCyYUbgbWeYYX062CBXYTS30KC55N6AgFkAgEQMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATBNBgNVHSAERjBEMEIGCSsGAQQBgjcqATA1MDMGCCsGAQUFBwIBFidodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtpL21zY29ycC9jcHMwJwYJKwYBBAGCNxUKBBowGDAKBggrBgEFBQcDAjAKBggrBgEFBQcDATCCBW0GA1UdEQSCBWQwggVgggx3d3cuYmluZy5jb22CEGRpY3QuYmluZy5jb20uY26CEyoucGxhdGZvcm0uYmluZy5jb22CCiouYmluZy5jb22CCGJpbmcuY29tghZpZW9ubGluZS5taWNyb3NvZnQuY29tghMqLndpbmRvd3NzZWFyY2guY29tghljbi5pZW9ubGluZS5taWNyb3NvZnQuY29tghEqLm9yaWdpbi5iaW5nLmNvbYINKi5tbS5iaW5nLm5ldIIOKi5hcGkuYmluZy5jb22CGGVjbi5kZXYudmlydHVhbGVhcnRoLm5ldIINKi5jbi5iaW5nLm5ldIINKi5jbi5iaW5nLmNvbYIQc3NsLWFwaS5iaW5nLmNvbYIQc3NsLWFwaS5iaW5nLm5ldIIOKi5hcGkuYmluZy5uZXSCDiouYmluZ2FwaXMuY29tgg9iaW5nc2FuZGJveC5jb22CFmZlZWRiYWNrLm1pY3Jvc29mdC5jb22CG2luc2VydG1lZGlhLmJpbmcub2ZmaWNlLm5ldIIOci5iYXQuYmluZy5jb22CECouci5iYXQuYmluZy5jb22CEiouZGljdC5iaW5nLmNvbS5jboIPKi5kaWN0LmJpbmcuY29tgg4qLnNzbC5iaW5nLmNvbYIQKi5hcHBleC5iaW5nLmNvbYIWKi5wbGF0Zm9ybS5jbi5iaW5nLmNvbYINd3AubS5iaW5nLmNvbYIMKi5tLmJpbmcuY29tgg9nbG9iYWwuYmluZy5jb22CEXdpbmRvd3NzZWFyY2guY29tgg5zZWFyY2gubXNuLmNvbYIRKi5iaW5nc2FuZGJveC5jb22CGSouYXBpLnRpbGVzLmRpdHUubGl2ZS5jb22CDyouZGl0dS5saXZlLmNvbYIYKi50MC50aWxlcy5kaXR1LmxpdmUuY29tghgqLnQxLnRpbGVzLmRpdHUubGl2ZS5jb22CGCoudDIudGlsZXMuZGl0dS5saXZlLmNvbYIYKi50My50aWxlcy5kaXR1LmxpdmUuY29tghUqLnRpbGVzLmRpdHUubGl2ZS5jb22CCzNkLmxpdmUuY29tghNhcGkuc2VhcmNoLmxpdmUuY29tghRiZXRhLnNlYXJjaC5saXZlLmNvbYIVY253ZWIuc2VhcmNoLmxpdmUuY29tggxkZXYubGl2ZS5jb22CDWRpdHUubGl2ZS5jb22CEWZhcmVjYXN0LmxpdmUuY29tgg5pbWFnZS5saXZlLmNvbYIPaW1hZ2VzLmxpdmUuY29tghFsb2NhbC5saXZlLmNvbS5hdYIUbG9jYWxzZWFyY2gubGl2ZS5jb22CFGxzNGQuc2VhcmNoLmxpdmUuY29tgg1tYWlsLmxpdmUuY29tghFtYXBpbmRpYS5saXZlLmNvbYIObG9jYWwubGl2ZS5jb22CDW1hcHMubGl2ZS5jb22CEG1hcHMubGl2ZS5jb20uYXWCD21pbmRpYS5saXZlLmNvbYINbmV3cy5saXZlLmNvbYIcb3JpZ2luLmNud2ViLnNlYXJjaC5saXZlLmNvbYIWcHJldmlldy5sb2NhbC5saXZlLmNvbYIPc2VhcmNoLmxpdmUuY29tghJ0ZXN0Lm1hcHMubGl2ZS5jb22CDnZpZGVvLmxpdmUuY29tgg92aWRlb3MubGl2ZS5jb22CFXZpcnR1YWxlYXJ0aC5saXZlLmNvbYIMd2FwLmxpdmUuY29tghJ3ZWJtYXN0ZXIubGl2ZS5jb22CE3dlYm1hc3RlcnMubGl2ZS5jb22CFXd3dy5sb2NhbC5saXZlLmNvbS5hdYIUd3d3Lm1hcHMubGl2ZS5jb20uYXUwDQYJKoZIhvcNAQELBQADggIBADTpW/UWeupk40OP6k4yxihKStswxwqPAfMRmx4XyqmTAawAKRNM+6EZth1BQdPdOplwRTvs69kkmUHJH+ZjYXBezEACWkzEiNUQnzkRWajdSQIz08Ubj/mBD6U8xLYD+NXgiB0xNWabd8aiPsqPaj6I3qkNw4JvtgtHZQG1zlwC5/Lu6yV3DM3sKpQMyBmOnX6nVUiS0MTOzLgZOQzRk07nO7EXWGcKTmDBjE8cqv5IA/jQ6gtaxCI5pDxfXK4ct7oQyoChfxOXcEDKMmMndFmg9ch5c4an/FRM2cgzDfjR01A71LNUpLUdOjNV0T+ZEStqEpdyDFfjrHGDtzLyqEz3iyvvQFyjmlGh6OtZXwjCPpnVSrKCmfJKio0kUxyq+6t5tZAQbPVgFKiMrVnU+sgvmNVip1toijyz8vMVCkwJ2G++7xjJukoELMxZ50W4/SAMZLy1Asx02NBwYCu9+CTQPVnmPe7rmxhlQRBOfDNa1+5jwRHY64YudEzKhWR1uqS3ABd/fk+TL86yuNYGAgxnOm1FtOGieRgViV3+NzC+bDbuUOtmbD/GvDGmRwJRcCTHL7jBmkHePh2ABY93NE/IbkaDP6l1Kw98AfqkzSUxhqHXuThe7KIoX9/0zv4AA1WZFis1QvAG7dpl9eio6vCdC/73HvBAlqRL+7Mb1uu0
diff --git a/test/integration/targets/azure_rm_appgateway/files/cert2.txt b/test/integration/targets/azure_rm_appgateway/files/cert2.txt
deleted file mode 100644
index a539dbcaf3..0000000000
--- a/test/integration/targets/azure_rm_appgateway/files/cert2.txt
+++ /dev/null
@@ -1 +0,0 @@
-MIIKsQIBAzCCCm0GCSqGSIb3DQEHAaCCCl4EggpaMIIKVjCCBg8GCSqGSIb3DQEHAaCCBgAEggX8MIIF+DCCBfQGCyqGSIb3DQEMCgECoIIE/jCCBPowHAYKKoZIhvcNAQwBAzAOBAj37r+wRsc6/gICB9AEggTY1V5HNscO+2bZb7JSMme1ljERe7DCiRE2cJsPKlwW2/NEiSqAUX7gXKK0ISr6Dto71rFdza0uPwbcQoPN1QwkJHSAkxWg4OcAx2kf7077tlHhf5rzxTn5V3wXM0Q3h6NsDjSqSHjYVISIiXItUzlDaBpMY/NUFNCTyJR5I91MWsljrJ/bQaAIo57HJR9nzjY5DaBA9P3bAhmX5LJRGsJWoCEaGeeVQ3Yn6yD06ordiJnf6dNxqQGN+o2x54gqfmw+RnoC2f8VAsTIfb3fwJPKdg2JiJIa6Ms2Sc8VR7VGmZt34qZwTPBrzeqJjtIMT41bBae46lmma8ypYwErqzOYSrHqXPXzaxlloYy81HYWAsJTWyBxTsVBcLom5m9ru79+SKG35xY1wSkzZmWMNFfVRFCJy/X+h2ErrGYjogCHYaIUmiosvUccwRUXGU083ul9iTcz/Dl79VBz63OFX/CnZMDTQ8ugbqpvW78pAnBU0r8MUubHciD1sJG2zmMlxCAzan6BLm9OMyhTNIbzYOjQQw99MQQys/ZeyNLqTFHTeGRfU2ewqgHjbH2PYCQfjipXSmdmsSsGxlLA9AOtwAk3QKJ77P03HRGOeXmy/I4iIHuIQuaQcjfprNR2fI36dftDo7U4gvRQHkiti+zNVqpi3/hIc2k7O8bCcMeSvfIlUvWIPUrUceZmpVPpLdcFcQbN9+1nZwiFYydOhrPnlp40rSO3RM08EmQUfRYt8fwRFcoWBX3b411vOqZVGeMfMtThMYI53R4Cmh5tUp93FslHNmIfnuewhHfIm+vtCicLcW6TaC2l4EqmNf0flK5m5nANotCfqj87MPsB83qPwol/91BTKaxuH2hKrZDgU1ibPE8NhzBinp2ANi0BHK3Sl0CsC2MPyZpFY+4MWvk/SI9ex4VsKYKmhubOFkhDLLBZH0UEmUdNTH4Gd76GsDnfI9arR2ctM9ecTPeu74hKiHlNZhc4U3TX20FBeqF5tZYnfCLRhvdiNM9AlwEKqqQEe0W7PrALcNVdjhJl0X9+0Br28E3RKZQRITWa10Vjmh0WcYrzEQ3/qEZYbqVpHMp+kdrHxB65v0zlGxjdwyKzafLzqYXmaHOyVlFnkayNaAkVVxOCzNrxB9HfhjvhjWafeMvA0p7O9CxTD2xPEhUaHQ5j7L8F0alfMYcg73SdGHAcY6AV8+eh0jqs3IF68cquXOl5Bm8uYKRjtgl9nY6hYc0lRDdtFHZo8ayNDr0cltNU7XZTaCKVNSDTRn92rTNJY0E3PD5HSKcRi58WJrIgEDGasyleRkRlGTY7512Qut0rg7m1Eyp6MK+sNmSSA7cR70pH7I1dwy4VrJMODdMH11y1QJF2EQWQdN00Js54tjVgTIO3btb5N7jhNYpRedv0a4UZ8TdDI4ZMCMf3SdP3xbQ06M1pFrS8WQzwp3KTk8vmnseJL84n0hC8KqWmGmTWHTa9dwmopeM6Xh/Jm1pkrgrloxqfSlscGEJE0plAnk1mLx29FxswfZ6a7pNKg7CydK4SiDkqM+pWukPbgKODqYPUvS0nk3RGGXvZSIzTbvm77tF+MqXOb6Rn+IflIk4yZsjIBQA0I/bQ78YDzXUVsrtAi9waRoCZs+L48NUy4zpKn25FMqkrziVn+TGB4jANBgkrBgEEAYI3EQIxADATBgkqhkiG9w0BCRUxBgQEAQAAADBdBgkqhkiG9w0BCRQxUB5OAHQAZQAtAGMANwBmADEAYwBhADYAMQAtADQAOAA1ADQALQA0ADgAZQBmAC0AYQAwADgANQAtAGQANABhADIAZgA1AGYAOAAyADcAZAAxMF0GCSsGAQQBgjcRATFQHk4ATQBpAGMAcgBvAHMAbwBmAHQAIABTAHQAcgBvAG4AZwAgAEMAcgB5AHAAdABvAGcAcgBhAHAAaABpAGMAIABQAHIAbwB2AGkAZABlAHIwggQ/BgkqhkiG9w0BBwagggQwMIIELAIBADCCBCUGCSqGSIb3DQEHATAcBgoqhkiG9w0BDAEDMA4ECFcAfrkm3ibUAgIH0ICCA/hlBog8GY9GCpucTwAxBGa0cOGQ29EK0xfrmY/Dv59IeJhRr47Mvl1XNk5PIInb64RsOVr00jrJAbfgLLKMBowcQLT7k6jGbGBdOzC57x9DNP0VuHsIIym0Z+SpJgEWBbtdBTGzgNw/YoXYxT4Rtka9ScSyCFjmmCzXz7bGLqC7yrGb7BzigQ9y4u4bg0pf75pERzN8rJM29Ob2IydkgARfpmbNKjdMCtD6dI7tafG2lQfUX6sgQY+Sy5HTz3ansN8X1yv2WQTu8Drxf2ce55v4WrFbPTTND94ubgDt7jvbCe1DuNP1DAYmQ5pbW0GGqF1x2csK5WWD7J8FD08VaQFM8y8pGIUeUkN4rYU3eTdTAQe+ec2hOr9QZn1Sb/p5u4KqIMn4MSCQ8EU0gXa2JETdUjXPr/5JFZTidJYagRyMIkYnwg9uusikctulaBsHMBKMYQ0Z19CEbcd2phdoxWTrtp7kwwjnu64zPgE6ALe9yJOT8AFEB6H1c16Z+aPGj9hbhkh6tcdGWUvzDYq08wjKjP3nA78StIisUmeZPfAXJUquPzRZr0pmcwYWfyP54TdC2BvPlLW/QXVV44IGxUdLuI6mz4p+O2+xKu9QMFwdcpij2ZK4uMrBLDo7ZoTQ4rBRnn471AMUKgeP0D5tbl8PygUU1RqHv34ok3fwx0WglzdMQJyt53PiPWW4lipwUtUfd0eD8CXoMccf8XJmugVUBCD1wQsyCW6RrR9RX8HXVBrm5O2HKfJcQYznl3qHqXb6ofvbOQ3S+v0ALN+sma8Tn6JceVTAOH+UuMdcu0FIDYnpmrvMecnJ2kbs1Y35mj4rSJyP5PGLg+ygb9VlBPwCCem/jHL+YivN38+0oWqfn2slyI4FNKX+5U8M6xpiEaq6McKwKZC1d51A4dUdMAkO2d1Z6rVjqhKeqE6HWD9A0cyPBFZpNQskUfNDW8qILLEfEjhBi+s1LkHzKDykCN/ReFfRiQS84DekoC59cymM8Hs1geMWCMFWfut4HTd7ItYaiJz5qpYVY4U/8myhyWnrktjLjQ6OkdM9bBDIpRHj95MYEC26NlWQZwjk+yynPTZf0w5p2Ok6Dq2shJFykuJ1VkelgvsPe8qMa55Wp11dpREIFzXouPXNP/vFpziZcl1OKTd7Dwa+ruQFRfsoZGzupsBOYxrmYqr6kOm5mzMW0HAlsWwl5mY2aSQMvXBE6k32xTkPIlIp763Ee4m6cmx4+SDcO5D+a9t05QY4JmssL+x3T9qsbXSXDPEsg0cfVvuQYy9AYkIFOes4G45IagRAvhQQj9bEh8kTvp8CFDDtIrbWjX50zreb51VcAcEkIOLyROtIdLem0zA7MB8wBwYFKw4DAhoEFC0i4I5iwNYQug0vTVS0JC/Qm+/NBBTsUM0D9QxIZYUi+qlDy14sOcEaUwICB9A=
diff --git a/test/integration/targets/azure_rm_appgateway/meta/main.yml b/test/integration/targets/azure_rm_appgateway/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_appgateway/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_appgateway/tasks/main.yml b/test/integration/targets/azure_rm_appgateway/tasks/main.yml
deleted file mode 100644
index ca52c7b036..0000000000
--- a/test/integration/targets/azure_rm_appgateway/tasks/main.yml
+++ /dev/null
@@ -1,401 +0,0 @@
-- name: Prepare random number
- set_fact:
- rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-- name: Create a virtual network
- azure_rm_virtualnetwork:
- name: vnet{{ rpfx }}
- resource_group: "{{ resource_group }}"
- address_prefixes_cidr:
- - 10.1.0.0/16
- - 172.100.0.0/16
- dns_servers:
- - 127.0.0.1
- - 127.0.0.2
-- name: Create a subnet
- azure_rm_subnet:
- name: subnet{{ rpfx }}
- virtual_network_name: vnet{{ rpfx }}
- resource_group: "{{ resource_group }}"
- address_prefix_cidr: 10.1.0.0/24
- register: subnet_output
-
-- name: Create instance of Application Gateway -- check mode
- azure_rm_appgateway:
- resource_group: "{{ resource_group }}"
- name: "appgateway{{ rpfx }}"
- sku:
- name: standard_small
- tier: standard
- capacity: 2
- ssl_policy:
- policy_type: predefined
- policy_name: ssl_policy20150501
- disabled_ssl_protocols:
- - tls_v1_0
- cipher_suites:
- - tls_ecdhe_ecdsa_with_aes_256_gcm_sha384
- authentication_certificates:
- - name: cert1
- data: "{{ lookup('file', 'cert1.txt') }}"
- ssl_certificates:
- - name: cert2
- password: your-password
- data: "{{ lookup('file', 'cert2.txt') }}"
- gateway_ip_configurations:
- - subnet:
- id: "{{ subnet_output.state.id }}"
- name: app_gateway_ip_config
- frontend_ip_configurations:
- - subnet:
- id: "{{ subnet_output.state.id }}"
- name: sample_gateway_frontend_ip_config
- frontend_ports:
- - port: 90
- name: ag_frontend_port
- - port: 80
- name: http_frontend_port
- backend_address_pools:
- - backend_addresses:
- - ip_address: 10.0.0.4
- name: test_backend_address_pool
- probes:
- - name: custom_probe
- protocol: http
- host: 10.0.0.4
- path: /healthz
- interval: 30
- timeout: 30
- unhealthy_threshold: 3
- backend_http_settings_collection:
- - port: 80
- protocol: http
- cookie_based_affinity: enabled
- probe: custom_probe
- name: sample_appgateway_http_settings
- http_listeners:
- - frontend_ip_configuration: sample_gateway_frontend_ip_config
- frontend_port: ag_frontend_port
- protocol: https
- ssl_certificate: cert2
- name: sample_http_listener
- - frontend_ip_configuration: sample_gateway_frontend_ip_config
- frontend_port: http_frontend_port
- protocol: http
- name: http_listener
- request_routing_rules:
- - rule_type: basic
- backend_address_pool: test_backend_address_pool
- backend_http_settings: sample_appgateway_http_settings
- http_listener: sample_http_listener
- name: rule1
- - rule_type: basic
- http_listener: http_listener
- redirect_configuration: redirect_site_to_https
- name: http_redirect_rule
- redirect_configurations:
- - redirect_type: permanent
- target_listener: sample_http_listener
- include_path: true
- include_query_string: true
- name: redirect_site_to_https
- check_mode: yes
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: dump
- debug:
- var: output
-
-- name: Create instance of Application Gateway
- azure_rm_appgateway:
- resource_group: "{{ resource_group }}"
- name: "appgateway{{ rpfx }}"
- sku:
- name: standard_small
- tier: standard
- capacity: 2
- ssl_policy:
- policy_type: predefined
- policy_name: ssl_policy20150501
- disabled_ssl_protocols:
- - tls_v1_0
- cipher_suites:
- - tls_ecdhe_ecdsa_with_aes_256_gcm_sha384
- authentication_certificates:
- - name: cert1
- data: "{{ lookup('file', 'cert1.txt') }}"
- ssl_certificates:
- - name: cert2
- password: your-password
- data: "{{ lookup('file', 'cert2.txt') }}"
- gateway_ip_configurations:
- - subnet:
- id: "{{ subnet_output.state.id }}"
- name: app_gateway_ip_config
- frontend_ip_configurations:
- - subnet:
- id: "{{ subnet_output.state.id }}"
- name: sample_gateway_frontend_ip_config
- frontend_ports:
- - port: 90
- name: ag_frontend_port
- - port: 80
- name: http_frontend_port
- backend_address_pools:
- - backend_addresses:
- - ip_address: 10.0.0.4
- name: test_backend_address_pool
- probes:
- - name: custom_probe
- protocol: http
- host: 10.0.0.4
- path: /healthz
- interval: 30
- timeout: 30
- unhealthy_threshold: 3
- backend_http_settings_collection:
- - port: 80
- protocol: http
- cookie_based_affinity: enabled
- probe: custom_probe
- name: sample_appgateway_http_settings
- http_listeners:
- - frontend_ip_configuration: sample_gateway_frontend_ip_config
- frontend_port: ag_frontend_port
- protocol: https
- ssl_certificate: cert2
- name: sample_http_listener
- - frontend_ip_configuration: sample_gateway_frontend_ip_config
- frontend_port: http_frontend_port
- protocol: http
- name: http_listener
- request_routing_rules:
- - rule_type: Basic
- backend_address_pool: test_backend_address_pool
- backend_http_settings: sample_appgateway_http_settings
- http_listener: sample_http_listener
- name: rule1
- - rule_type: Basic
- http_listener: http_listener
- redirect_configuration: redirect_site_to_https
- name: http_redirect_rule
- redirect_configurations:
- - redirect_type: permanent
- target_listener: sample_http_listener
- include_path: true
- include_query_string: true
- name: redirect_site_to_https
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Try to update instance of Application Gateway - no change
- azure_rm_appgateway:
- resource_group: "{{ resource_group }}"
- name: "appgateway{{ rpfx }}"
- sku:
- name: standard_small
- tier: standard
- capacity: 2
- ssl_policy:
- policy_type: predefined
- policy_name: ssl_policy20150501
- disabled_ssl_protocols:
- - tls_v1_0
- cipher_suites:
- - tls_ecdhe_ecdsa_with_aes_256_gcm_sha384
- authentication_certificates:
- - name: cert1
- data: "{{ lookup('file', 'cert1.txt') }}"
- ssl_certificates:
- - name: cert2
- password: your-password
- data: "{{ lookup('file', 'cert2.txt') }}"
- gateway_ip_configurations:
- - subnet:
- id: "{{ subnet_output.state.id }}"
- name: app_gateway_ip_config
- frontend_ip_configurations:
- - subnet:
- id: "{{ subnet_output.state.id }}"
- name: sample_gateway_frontend_ip_config
- frontend_ports:
- - port: 90
- name: ag_frontend_port
- - port: 80
- name: http_frontend_port
- backend_address_pools:
- - backend_addresses:
- - ip_address: 10.0.0.4
- name: test_backend_address_pool
- probes:
- - name: custom_probe
- protocol: http
- host: 10.0.0.4
- path: /healthz
- interval: 30
- timeout: 30
- unhealthy_threshold: 3
- backend_http_settings_collection:
- - port: 80
- protocol: http
- cookie_based_affinity: enabled
- probe: custom_probe
- name: sample_appgateway_http_settings
- http_listeners:
- - frontend_ip_configuration: sample_gateway_frontend_ip_config
- frontend_port: ag_frontend_port
- protocol: https
- ssl_certificate: cert2
- name: sample_http_listener
- - frontend_ip_configuration: sample_gateway_frontend_ip_config
- frontend_port: http_frontend_port
- protocol: http
- name: http_listener
- request_routing_rules:
- - rule_type: Basic
- backend_address_pool: test_backend_address_pool
- backend_http_settings: sample_appgateway_http_settings
- http_listener: sample_http_listener
- name: rule1
- - rule_type: Basic
- http_listener: http_listener
- redirect_configuration: redirect_site_to_https
- name: http_redirect_rule
- redirect_configurations:
- - redirect_type: permanent
- target_listener: sample_http_listener
- include_path: true
- include_query_string: true
- name: redirect_site_to_https
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - not output.changed
-
-- name: Try to update instance of Application Gateway - single change
- azure_rm_appgateway:
- resource_group: "{{ resource_group }}"
- name: "appgateway{{ rpfx }}"
- sku:
- name: standard_small
- tier: standard
- capacity: 2
- ssl_policy:
- policy_type: predefined
- policy_name: ssl_policy20150501
- disabled_ssl_protocols:
- - tls_v1_0
- cipher_suites:
- - tls_ecdhe_ecdsa_with_aes_256_gcm_sha384
- authentication_certificates:
- - name: cert1
- data: "{{ lookup('file', 'cert1.txt') }}"
- ssl_certificates:
- - name: cert2
- password: your-password
- data: "{{ lookup('file', 'cert2.txt') }}"
- gateway_ip_configurations:
- - subnet:
- id: "{{ subnet_output.state.id }}"
- name: app_gateway_ip_config
- frontend_ip_configurations:
- - subnet:
- id: "{{ subnet_output.state.id }}"
- name: sample_gateway_frontend_ip_config
- frontend_ports:
- - port: 90
- name: ag_frontend_port
- - port: 80
- name: http_frontend_port
- backend_address_pools:
- - backend_addresses:
- - ip_address: 10.0.0.4
- name: test_backend_address_pool
- probes:
- - name: custom_probe
- protocol: http
- host: 10.0.0.4
- path: /healthz
- interval: 30
- timeout: 30
- unhealthy_threshold: 3
- backend_http_settings_collection:
- - port: 81
- protocol: http
- cookie_based_affinity: enabled
- probe: custom_probe
- name: sample_appgateway_http_settings
- http_listeners:
- - frontend_ip_configuration: sample_gateway_frontend_ip_config
- frontend_port: ag_frontend_port
- protocol: https
- ssl_certificate: cert2
- name: sample_http_listener
- - frontend_ip_configuration: sample_gateway_frontend_ip_config
- frontend_port: http_frontend_port
- protocol: http
- name: http_listener
- request_routing_rules:
- - rule_type: Basic
- backend_address_pool: test_backend_address_pool
- backend_http_settings: sample_appgateway_http_settings
- http_listener: sample_http_listener
- name: rule1
- - rule_type: Basic
- http_listener: http_listener
- redirect_configuration: redirect_site_to_https
- name: http_redirect_rule
- redirect_configurations:
- - redirect_type: permanent
- target_listener: sample_http_listener
- include_path: true
- include_query_string: true
- name: redirect_site_to_https
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Delete instance of Application Gateway -- check mode
- azure_rm_appgateway:
- resource_group: "{{ resource_group }}"
- name: "appgateway{{ rpfx }}"
- state: absent
- check_mode: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete instance of Application Gateway
- azure_rm_appgateway:
- resource_group: "{{ resource_group }}"
- name: "appgateway{{ rpfx }}"
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete unexisting instance of Application Gateway
- azure_rm_appgateway:
- resource_group: "{{ resource_group }}"
- name: "appgateway{{ rpfx }}"
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed == false
diff --git a/test/integration/targets/azure_rm_appserviceplan/aliases b/test/integration/targets/azure_rm_appserviceplan/aliases
deleted file mode 100644
index a6b233ed02..0000000000
--- a/test/integration/targets/azure_rm_appserviceplan/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group4
-unstable
-destructive
diff --git a/test/integration/targets/azure_rm_appserviceplan/meta/main.yml b/test/integration/targets/azure_rm_appserviceplan/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_appserviceplan/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_appserviceplan/tasks/main.yml b/test/integration/targets/azure_rm_appserviceplan/tasks/main.yml
deleted file mode 100644
index f5746232eb..0000000000
--- a/test/integration/targets/azure_rm_appserviceplan/tasks/main.yml
+++ /dev/null
@@ -1,84 +0,0 @@
-- name: Prepare facts
- set_fact:
- linux_plan_resource_group: "{{ resource_group_secondary }}"
- win_plan_name: "{{ (resource_prefix | replace('-','x'))[-8:] }}winplan"
- linux_plan_name: "{{ (resource_prefix | replace('-','x'))[-8:] }}linplan"
-
-- name: create a windows plan
- azure_rm_appserviceplan:
- name: "{{ win_plan_name }}1"
- resource_group: "{{ resource_group }}"
- sku: B1
- register: output
-
-- name: assert app service was created
- assert:
- that:
- - output.changed
- - output.id
-
-- name: create a linux plan
- azure_rm_appserviceplan:
- resource_group: "{{ linux_plan_resource_group }}"
- name: "{{ linux_plan_name }}1"
- sku: S1
- is_linux: true
- number_of_workers: 1
- register: output
-
-- name: assert app service was created
- assert:
- that:
- - output.changed
- - output.id
-
-- name: get app service plan by name
- azure_rm_appserviceplan_info:
- resource_group: "{{ linux_plan_resource_group }}"
- name: "{{ linux_plan_name }}1"
- register: output
-
-- name: assert is_linux is True
- assert:
- that:
- - output.appserviceplans | length == 1
- - output.appserviceplans[0].is_linux == True
-
-- name: create linux app service plan idempotent
- azure_rm_appserviceplan:
- resource_group: "{{ linux_plan_resource_group }}"
- name: "{{ linux_plan_name }}1"
- sku: S1
- is_linux: true
- number_of_workers: 1
- register: output
-
-- name: assert app service was created
- assert:
- that: not output.changed
-
-- name: update a windows plan sku
- azure_rm_appserviceplan:
- name: "{{ win_plan_name }}1"
- resource_group: "{{ resource_group }}"
- sku: B2
- register: output
-
-- name: assert app service was updated
- assert:
- that:
- - output.changed
-
-- name: update a linux plan number of workers
- azure_rm_appserviceplan:
- resource_group: "{{ linux_plan_resource_group }}"
- name: "{{ linux_plan_name }}1"
- sku: S1
- is_linux: true
- number_of_workers: 2
- register: output
-
-- name: assert app service was updated
- assert:
- that:
- - output.changed
diff --git a/test/integration/targets/azure_rm_automationaccount/aliases b/test/integration/targets/azure_rm_automationaccount/aliases
deleted file mode 100644
index 04a2c98ae7..0000000000
--- a/test/integration/targets/azure_rm_automationaccount/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group4
-destructive
-azure_rm_automationaccount_facts
diff --git a/test/integration/targets/azure_rm_automationaccount/meta/main.yml b/test/integration/targets/azure_rm_automationaccount/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_automationaccount/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_automationaccount/tasks/main.yml b/test/integration/targets/azure_rm_automationaccount/tasks/main.yml
deleted file mode 100644
index 882693999a..0000000000
--- a/test/integration/targets/azure_rm_automationaccount/tasks/main.yml
+++ /dev/null
@@ -1,88 +0,0 @@
-- name: Prepare random number
- set_fact:
- rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- name: "account{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-- name: Create automation account
- azure_rm_automationaccount:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- check_mode: yes
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Create automation account
- azure_rm_automationaccount:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- register: output
-
-- assert:
- that:
- - output.changed
- - output.id
-
-- name: Create automation account
- azure_rm_automationaccount:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- register: output
-
-- assert:
- that:
- - not output.changed
-
-- name: Get automation account
- azure_rm_automationaccount_facts:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- list_statistics: yes
- list_usages: yes
- list_keys: yes
- register: facts
-
-- assert:
- that:
- - facts.automation_accounts | length == 1
- - facts.automation_accounts[0].keys
- - facts.automation_accounts[0].usages
- - facts.automation_accounts[0].statistics
- - facts.automation_accounts[0].state == "Ok"
-
-- name: Delete account
- azure_rm_automationaccount:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- state: absent
- check_mode: yes
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Delete account
- azure_rm_automationaccount:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- state: absent
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Delete account
- azure_rm_automationaccount:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- state: absent
- register: output
-
-- assert:
- that:
- - not output.changed
diff --git a/test/integration/targets/azure_rm_autoscale/aliases b/test/integration/targets/azure_rm_autoscale/aliases
deleted file mode 100644
index 19533b3424..0000000000
--- a/test/integration/targets/azure_rm_autoscale/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group5
-destructive
-azure_rm_autoscale
diff --git a/test/integration/targets/azure_rm_autoscale/meta/main.yml b/test/integration/targets/azure_rm_autoscale/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_autoscale/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_autoscale/tasks/main.yml b/test/integration/targets/azure_rm_autoscale/tasks/main.yml
deleted file mode 100644
index 9d3b54a3ee..0000000000
--- a/test/integration/targets/azure_rm_autoscale/tasks/main.yml
+++ /dev/null
@@ -1,219 +0,0 @@
-- name: Prepare random number
- set_fact:
- rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- name: "scale{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-- name: Create virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: testVnet
- address_prefixes: "10.0.0.0/16"
-
-- name: Add subnet
- azure_rm_subnet:
- resource_group: "{{ resource_group }}"
- name: testSubnet
- address_prefix: "10.0.1.0/24"
- virtual_network: testVnet
-
-- name: Create VMSS
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ resource_group }}"
- name: testVMSS{{ rpfx }}
- vm_size: Standard_DS1_v2
- admin_username: testuser
- ssh_password_enabled: true
- admin_password: "Password1234!"
- capacity: 2
- virtual_network_name: testVnet
- subnet_name: testSubnet
- upgrade_policy: Manual
- tier: Standard
- managed_disk_type: Standard_LRS
- os_disk_caching: ReadWrite
- image:
- offer: CoreOS
- publisher: CoreOS
- sku: Stable
- version: latest
- data_disks:
- - lun: 0
- disk_size_gb: 64
- caching: ReadWrite
- managed_disk_type: Standard_LRS
- register: vmss
-
-- name: create auto scaling (check mode)
- azure_rm_autoscale:
- resource_group: "{{ resource_group }}"
- name: "{{ name }}"
- target: "{{ vmss.ansible_facts.azure_vmss.id }}"
- enabled: true
- profiles:
- - count: '1'
- recurrence_days:
- - Monday
- name: Auto created scale condition
- recurrence_timezone: China Standard Time
- recurrence_mins:
- - '0'
- min_count: '1'
- max_count: '1'
- recurrence_frequency: Week
- recurrence_hours:
- - '18'
- check_mode: yes
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: create auto scaling
- azure_rm_autoscale:
- resource_group: "{{ resource_group }}"
- name: "{{ name }}"
- target:
- name: "testVMSS{{ rpfx }}"
- types: "virtualMachineScaleSets"
- namespace: "Microsoft.Compute"
- enabled: true
- profiles:
- - count: '1'
- recurrence_days:
- - Monday
- name: Auto created scale condition
- recurrence_timezone: China Standard Time
- recurrence_mins:
- - '0'
- min_count: '1'
- max_count: '1'
- recurrence_frequency: Week
- recurrence_hours:
- - '18'
- register: output
-
-- assert:
- that:
- - output.changed
- - output.id
-
-- name: create auto scaling (idemponent)
- azure_rm_autoscale:
- resource_group: "{{ resource_group }}"
- name: "{{ name }}"
- target: "{{ vmss.ansible_facts.azure_vmss.id }}"
- enabled: true
- profiles:
- - count: '1'
- recurrence_days:
- - Monday
- name: Auto created scale condition
- recurrence_timezone: China Standard Time
- recurrence_mins:
- - '0'
- min_count: '1'
- max_count: '1'
- recurrence_frequency: Week
- recurrence_hours:
- - '18'
- register: output
-
-- assert:
- that:
- - not output.changed
- - output.id
-
-- name: update auto scaling
- azure_rm_autoscale:
- resource_group: "{{ resource_group }}"
- name: "{{ name }}"
- target: "{{ vmss.ansible_facts.azure_vmss.id }}"
- enabled: true
- profiles:
- - count: '1'
- recurrence_days:
- - Monday
- name: Auto created scale condition 0
- rules:
- - time_aggregation: Average
- time_window: 10
- direction: Increase
- metric_name: Percentage CPU
- metric_resource_uri: "{{ vmss.ansible_facts.azure_vmss.id }}"
- value: '1'
- threshold: 70
- cooldown: 5
- time_grain: 1
- statistic: Average
- operator: GreaterThan
- type: ChangeCount
- max_count: '1'
- recurrence_mins:
- - '0'
- min_count: '1'
- recurrence_timezone: China Standard Time
- recurrence_frequency: Week
- recurrence_hours:
- - '6'
- register: output
-
-- assert:
- that:
- - output.changed
- - output.profiles[0].rules[0].metric_resource_uri == vmss.ansible_facts.azure_vmss.id
-
-- name: delete auto scaling (check mode)
- azure_rm_autoscale:
- resource_group: "{{ resource_group }}"
- name: "{{ name }}"
- state: absent
- check_mode: yes
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: delete auto scaling
- azure_rm_autoscale:
- resource_group: "{{ resource_group }}"
- name: "{{ name }}"
- state: absent
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: delete auto scaling (idemponetent)
- azure_rm_autoscale:
- resource_group: "{{ resource_group }}"
- name: "{{ name }}"
- state: absent
- register: output
-
-- assert:
- that:
- - not output.changed
-
-- name: Clean VMSS
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ resource_group }}"
- vm_size: Standard_DS1_v2
- name: testVMSS{{ rpfx }}
- state: absent
-
-- name: Clean subnet
- azure_rm_subnet:
- resource_group: "{{ resource_group }}"
- name: testSubnet
- virtual_network: testVnet
- state: absent
-
-- name: Clean virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: testVnet
- state: absent
diff --git a/test/integration/targets/azure_rm_availabilityset/aliases b/test/integration/targets/azure_rm_availabilityset/aliases
deleted file mode 100644
index 17456633d2..0000000000
--- a/test/integration/targets/azure_rm_availabilityset/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group2
-destructive
-azure_rm_availalibityset_info
diff --git a/test/integration/targets/azure_rm_availabilityset/meta/main.yml b/test/integration/targets/azure_rm_availabilityset/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_availabilityset/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_availabilityset/tasks/main.yml b/test/integration/targets/azure_rm_availabilityset/tasks/main.yml
deleted file mode 100644
index 8127d3c134..0000000000
--- a/test/integration/targets/azure_rm_availabilityset/tasks/main.yml
+++ /dev/null
@@ -1,136 +0,0 @@
-- name: Create an availability set with default options
- azure_rm_availabilityset:
- name: myavailabilityset1
- resource_group: "{{ resource_group }}"
- tags:
- tag1: testtag
- register: results
-
-- assert:
- that: results.changed
-
-- name: Create an availability set with advanced options
- azure_rm_availabilityset:
- name: myavailabilityset2
- resource_group: "{{ resource_group }}"
- platform_update_domain_count: 5
- platform_fault_domain_count: 2
- sku: Aligned
- register: results
-
-- assert:
- that: results.changed
-
-- name: Modify availabilty set immutable options - no changes, fail for immutable options
- azure_rm_availabilityset:
- name: myavailabilityset2
- resource_group: "{{ resource_group }}"
- platform_update_domain_count: 2
- platform_fault_domain_count: 2
- sku: Aligned
- register: results
- ignore_errors: yes
-
-- assert:
- that:
- - not results.changed
- - results.msg == 'You tried to change platform_update_domain_count but is was unsuccessful. An Availability Set is immutable, except tags'
-
-- name: Modify availabilty set immutable options and set tags - change tags and fail for immutable options
- azure_rm_availabilityset:
- name: myavailabilityset2
- resource_group: "{{ resource_group }}"
- platform_update_domain_count: 2
- platform_fault_domain_count: 2
- sku: Aligned
- tags:
- test1: modified
- register: results
- ignore_errors: yes
-
-- assert:
- that:
- - not results.changed
- - results.msg == 'You tried to change platform_update_domain_count but is was unsuccessful. An Availability Set is immutable, except tags'
-
-- name: Modify availabilty set options to update tags
- azure_rm_availabilityset:
- name: myavailabilityset2
- resource_group: "{{ resource_group }}"
- platform_update_domain_count: 5
- platform_fault_domain_count: 2
- sku: Aligned
- tags:
- test2: modified
- register: results
-
-- assert:
- that:
- - results.state.tags.test2 == 'modified'
-
-- name: Create availability set with incorrect fault domain parameter
- azure_rm_availabilityset:
- name: myavailabilityset3
- resource_group: "{{ resource_group }}"
- platform_update_domain_count: 5
- platform_fault_domain_count: 4
- sku: Aligned
- register: results
- ignore_errors: yes
-
-- assert:
- { that: "'The specified fault domain count 4 must fall in the range 1 to' in results['msg']" }
-
-- name: Test check_mode
- azure_rm_availabilityset:
- name: myavailabilityset2
- resource_group: "{{ resource_group }}"
- platform_update_domain_count: 5
- platform_fault_domain_count: 2
- sku: Aligned
- tags:
- checktest1: modified1
- checktest2: modified2
- check_mode: yes
- register: results
-
-- assert:
- that:
- - not results.changed
- - results.state.tags.checktest1 == 'modified1'
-
-#
-# azure_rm_availabilityset_facts tests
-#
-- name: Get facts for created availability set
- azure_rm_availabilityset_info:
- name: myavailabilityset2
- resource_group: "{{ resource_group }}"
- register: results
-
-- assert:
- that:
- - not results.changed
- - not results.failed
- - results.ansible_info.azure_availabilitysets[0].properties.platformFaultDomainCount == 2
- - results.ansible_info.azure_availabilitysets[0].properties.platformUpdateDomainCount == 5
- - results.ansible_info.azure_availabilitysets[0].sku == 'Aligned'
-
-
-- name: Delete an availability set
- azure_rm_availabilityset:
- name: myavailabilityset1
- resource_group: "{{ resource_group }}"
- state: absent
-
-- name: Delete an availability set
- azure_rm_availabilityset:
- name: myavailabilityset2
- resource_group: "{{ resource_group }}"
- state: absent
-
-- name: Delete an availability set
- azure_rm_availabilityset:
- name: myavailabilityset3
- resource_group: "{{ resource_group }}"
- state: absent
diff --git a/test/integration/targets/azure_rm_azurefirewall/aliases b/test/integration/targets/azure_rm_azurefirewall/aliases
deleted file mode 100644
index 4b130f3078..0000000000
--- a/test/integration/targets/azure_rm_azurefirewall/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group3
-destructive
-disabled # See: https://github.com/ansible/ansible/issues/62307
diff --git a/test/integration/targets/azure_rm_azurefirewall/meta/main.yml b/test/integration/targets/azure_rm_azurefirewall/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_azurefirewall/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_azurefirewall/tasks/main.yml b/test/integration/targets/azure_rm_azurefirewall/tasks/main.yml
deleted file mode 100644
index face59c2b6..0000000000
--- a/test/integration/targets/azure_rm_azurefirewall/tasks/main.yml
+++ /dev/null
@@ -1,277 +0,0 @@
-- name: Fix resource prefix
- set_fact:
- virtual_network_name: myVirtualNetwork
- subnet_name: AzureFirewallSubnet
- public_ipaddress_name: myPublicIpAddress
- azure_firewall_name: myFirewall
-
-- name: Create virtual network
- azure_rm_virtualnetwork:
- name: "{{ virtual_network_name }}"
- address_prefixes_cidr:
- - 10.1.0.0/16
- - 172.100.0.0/16
- dns_servers:
- - 127.0.0.1
- - 127.0.0.3
- tags:
- testing: testing
- delete: on-exit
- resource_group: "{{ resource_group }}"
-
-- name: Create subnet
- azure_rm_subnet:
- name: "{{ subnet_name }}"
- virtual_network_name: "{{ virtual_network_name }}"
- resource_group: "{{ resource_group }}"
- address_prefix_cidr: "10.1.0.0/24"
-
-- name: Create public IP address
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- allocation_method: Static
- name: "{{ public_ipaddress_name }}"
- sku: Standard
- register: pip_output
-
-- debug:
- var: pip_output
-
-- name: Create Azure Firewall
- azure_rm_azurefirewall:
- resource_group: '{{resource_group}}'
- name: '{{azure_firewall_name}}'
- #tags:
- # key1: value1
- application_rule_collections:
- - priority: 110
- action: deny
- rules:
- - name: rule1
- description: Deny inbound rule
- source_addresses:
- - 216.58.216.164
- - 10.0.0.0/25
- protocols:
- - type: https
- port: '443'
- target_fqdns:
- - www.test.com
- name: apprulecoll
- nat_rule_collections:
- - priority: 112
- action: dnat
- rules:
- - name: DNAT-HTTPS-traffic
- description: D-NAT all outbound web traffic for inspection
- source_addresses:
- - '*'
- destination_addresses:
- - "{{ pip_output.state.ip_address }}"
- destination_ports:
- - '443'
- protocols:
- - tcp
- translated_address: 1.2.3.5
- translated_port: '8443'
- name: natrulecoll
- network_rule_collections:
- - priority: 112
- action: deny
- rules:
- - name: L4-traffic
- description: Block traffic based on source IPs and ports
- protocols:
- - tcp
- source_addresses:
- - 192.168.1.1-192.168.1.12
- - 10.1.4.12-10.1.4.255
- destination_addresses:
- - '*'
- destination_ports:
- - 443-444
- - '8443'
- name: netrulecoll
- ip_configurations:
- - subnet:
- virtual_network_name: "{{ virtual_network_name }}"
- name: "{{ subnet_name }}"
- public_ip_address:
- name: "{{ public_ipaddress_name }}"
- name: azureFirewallIpConfiguration
- register: output
-
-- debug:
- var: output
-
-- name: Assert that output has changed
- assert:
- that:
- - output.changed
-
-- name: Create Azure Firewall -- idempotent
- azure_rm_azurefirewall:
- resource_group: '{{resource_group}}'
- name: '{{azure_firewall_name}}'
- application_rule_collections:
- - priority: 110
- action: deny
- rules:
- - name: rule1
- description: Deny inbound rule
- source_addresses:
- - 216.58.216.164
- - 10.0.0.0/25
- protocols:
- - type: https
- port: '443'
- target_fqdns:
- - www.test.com
- name: apprulecoll
- nat_rule_collections:
- - priority: 112
- action: dnat
- rules:
- - name: DNAT-HTTPS-traffic
- description: D-NAT all outbound web traffic for inspection
- source_addresses:
- - '*'
- destination_addresses:
- - "{{ pip_output.state.ip_address }}"
- destination_ports:
- - '443'
- protocols:
- - tcp
- translated_address: 1.2.3.5
- translated_port: '8443'
- name: natrulecoll
- network_rule_collections:
- - priority: 112
- action: deny
- rules:
- - name: L4-traffic
- description: Block traffic based on source IPs and ports
- protocols:
- - tcp
- source_addresses:
- - 192.168.1.1-192.168.1.12
- - 10.1.4.12-10.1.4.255
- destination_addresses:
- - '*'
- destination_ports:
- - 443-444
- - '8443'
- name: netrulecoll
- ip_configurations:
- - subnet:
- virtual_network_name: "{{ virtual_network_name }}"
- name: "{{ subnet_name }}"
- public_ip_address:
- name: "{{ public_ipaddress_name }}"
- name: azureFirewallIpConfiguration
- register: output
-
-- debug:
- var: output
-
-- name: Assert that output has not changed
- assert:
- that:
- - not output.changed
-
-- name: Create Azure Firewall -- change something
- azure_rm_azurefirewall:
- resource_group: '{{resource_group}}'
- name: '{{azure_firewall_name}}'
- application_rule_collections:
- - priority: 110
- action: deny
- rules:
- - name: rule1
- description: Deny inbound rule
- source_addresses:
- - 216.58.216.165
- - 10.0.0.0/25
- protocols:
- - type: https
- port: '443'
- target_fqdns:
- - www.test.com
- name: apprulecoll
- nat_rule_collections:
- - priority: 112
- action: dnat
- rules:
- - name: DNAT-HTTPS-traffic
- description: D-NAT all outbound web traffic for inspection
- source_addresses:
- - '*'
- destination_addresses:
- - "{{ pip_output.state.ip_address }}"
- destination_ports:
- - '443'
- protocols:
- - tcp
- translated_address: 1.2.3.6
- translated_port: '8443'
- name: natrulecoll
- network_rule_collections:
- - priority: 112
- action: deny
- rules:
- - name: L4-traffic
- description: Block traffic based on source IPs and ports
- protocols:
- - tcp
- source_addresses:
- - 192.168.1.1-192.168.1.12
- - 10.1.4.12-10.1.4.255
- destination_addresses:
- - '*'
- destination_ports:
- - 443-445
- - '8443'
- name: netrulecoll
- ip_configurations:
- - subnet:
- virtual_network_name: "{{ virtual_network_name }}"
- name: "{{ subnet_name }}"
- public_ip_address:
- name: "{{ public_ipaddress_name }}"
- name: azureFirewallIpConfiguration
- check_mode: yes
- register: output
-
-- name: Assert that output has changed
- assert:
- that:
- - output.changed
-
-- name: Get info of the Azure Firewall
- azure_rm_azurefirewall_info:
- resource_group: '{{resource_group}}'
- name: '{{azure_firewall_name}}'
- register: output
-
-- assert:
- that:
- - not output.changed
- - output.firewalls['id'] != None
- - output.firewalls['name'] != None
- - output.firewalls['location'] != None
- - output.firewalls['etag'] != None
- - output.firewalls['nat_rule_collections'] != None
- - output.firewalls['network_rule_collections'] != None
- - output.firewalls['ip_configurations'] != None
- - output.firewalls['provisioning_state'] != None
-
-- name: Delete Azure Firewall
- azure_rm_azurefirewall:
- resource_group: '{{resource_group}}'
- name: '{{azure_firewall_name}}'
- state: absent
- register: output
-
-- assert:
- that:
- - output.changed
diff --git a/test/integration/targets/azure_rm_batchaccount/aliases b/test/integration/targets/azure_rm_batchaccount/aliases
deleted file mode 100644
index 49acfee76c..0000000000
--- a/test/integration/targets/azure_rm_batchaccount/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/azure
-destructive
-shippable/azure/group2
diff --git a/test/integration/targets/azure_rm_batchaccount/meta/main.yml b/test/integration/targets/azure_rm_batchaccount/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_batchaccount/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_batchaccount/tasks/main.yml b/test/integration/targets/azure_rm_batchaccount/tasks/main.yml
deleted file mode 100644
index e62cb67cfc..0000000000
--- a/test/integration/targets/azure_rm_batchaccount/tasks/main.yml
+++ /dev/null
@@ -1,76 +0,0 @@
----
-# ----------------------------------------------------------------------------
-#
-# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
-#
-# ----------------------------------------------------------------------------
-#
-# This file is automatically generated by Magic Modules and manual
-# changes will be clobbered when the file is regenerated.
-#
-#
-# ----------------------------------------------------------------------------
-- name: Prepare random number
- set_fact:
- storage_account_name: "st{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- batch_account_name: "ba{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-- name: Create Storage Account
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account_name }}"
- location: eastus
- account_type: Standard_LRS
-
-- name: Create Batch Account
- azure_rm_batchaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ batch_account_name }}"
- location: eastus
- auto_storage_account:
- name: "{{ storage_account_name }}"
- pool_allocation_mode: batch_service
- register: output
-
-- name: Assert the resource was created
- assert:
- that:
- - output.changed
-
-- name: Create Batch Account -- idempotent
- azure_rm_batchaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ batch_account_name }}"
- location: eastus
- auto_storage_account:
- name: "{{ storage_account_name }}"
- pool_allocation_mode: batch_service
- register: output
-
-- name: Assert the resource was created
- assert:
- that:
- - not output.changed
-
-- name: Delete Batch Account
- azure_rm_batchaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ batch_account_name }}"
- location: eastus
- auto_storage_account:
- name: "{{ storage_account_name }}"
- pool_allocation_mode: batch_service
- state: absent
- register: output
-
-- name: Assert that state has changed
- assert:
- that:
- - output.changed
-
-- name: Clean up storage account
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account_name }}"
- state: absent
diff --git a/test/integration/targets/azure_rm_cdnprofile/aliases b/test/integration/targets/azure_rm_cdnprofile/aliases
deleted file mode 100644
index ea347fa407..0000000000
--- a/test/integration/targets/azure_rm_cdnprofile/aliases
+++ /dev/null
@@ -1,5 +0,0 @@
-cloud/azure
-shippable/azure/group2
-destructive
-azure_rm_cdnprofile_info
-azure_rm_cdnendpoint
diff --git a/test/integration/targets/azure_rm_cdnprofile/meta/main.yml b/test/integration/targets/azure_rm_cdnprofile/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_cdnprofile/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_cdnprofile/tasks/main.yml b/test/integration/targets/azure_rm_cdnprofile/tasks/main.yml
deleted file mode 100644
index 657881a507..0000000000
--- a/test/integration/targets/azure_rm_cdnprofile/tasks/main.yml
+++ /dev/null
@@ -1,276 +0,0 @@
-- name: Prepare random number
- set_fact:
- cdnprofilename: "cdnprofile{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- endpointname: "endpoint{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-
-- name: Create a CDN profile(check mode)
- azure_rm_cdnprofile:
- resource_group: "{{ resource_group }}"
- name: "{{ cdnprofilename }}"
- sku: standard_akamai
- tags:
- testing: testing
- delete: on-exit
- foo: bar
- check_mode: yes
-
-- name: Check there is no CDN profile created
- azure_rm_cdnprofile_info:
- resource_group: "{{ resource_group }}"
- name: "{{ cdnprofilename }}"
- register: fact
-
-- name: Check there is no CDN profile created
- assert: { that: "{{ fact.cdnprofiles | length }} == 0" }
-
-- name: Create a CDN profile
- azure_rm_cdnprofile:
- resource_group: "{{ resource_group }}"
- name: "{{ cdnprofilename }}"
- sku: standard_akamai
- tags:
- testing: testing
- delete: on-exit
- foo: bar
- register: output
-
-- name: Assert the CDN profile is well created
- assert:
- that:
- - output.changed
- - output.id != ''
-
-- name: Gather CDN profile facts
- azure_rm_cdnprofile_info:
- resource_group: "{{ resource_group }}"
- name: "{{ cdnprofilename }}"
- register: fact
-
-- name: Assert fact returns the created one
- assert:
- that:
- - "fact.cdnprofiles | length == 1"
- - fact.cdnprofiles[0].sku == 'Standard_Akamai'
- - fact.cdnprofiles[0].tags.foo == 'bar'
-
-- name: Create a CDN profile (idempotent)
- azure_rm_cdnprofile:
- resource_group: "{{ resource_group }}"
- name: "{{ cdnprofilename }}"
- sku: standard_akamai
- tags:
- testing: testing
- delete: on-exit
- foo: bar
- register: output
-
-- name: Assert idempotent
- assert:
- that:
- - not output.changed
-
-- name: Update the CDN profile
- azure_rm_cdnprofile:
- resource_group: "{{ resource_group }}"
- name: "{{ cdnprofilename }}"
- sku: standard_akamai
- tags:
- testing: testing
- delete: on-exit
- foo: bar
- baz: qux
- register: output
-
-- name: Assert the CDN profile is updated
- assert:
- that:
- - output.changed
-
-- name: Delete the CDN profile(check mode)
- azure_rm_cdnprofile:
- resource_group: "{{ resource_group }}"
- name: "{{ cdnprofilename }}"
- state: absent
- check_mode: yes
-
-- name: Gather CDN profile facts
- azure_rm_cdnprofile_info:
- resource_group: "{{ resource_group }}"
- name: "{{ cdnprofilename }}"
- register: fact
-
-- name: Assert the CDN is still there
- assert:
- that:
- - "fact.cdnprofiles | length == 1"
- - fact.cdnprofiles[0].sku == 'Standard_Akamai'
- - fact.cdnprofiles[0].tags.foo == 'bar'
- - fact.cdnprofiles[0].tags.baz == 'qux'
-
-- name: Create a Azure CDN endpoint(check mode)
- azure_rm_cdnendpoint:
- resource_group: "{{ resource_group }}"
- name: "{{ endpointname }}"
- profile_name: "{{ cdnprofilename }}"
- origins:
- - name: "org{{ endpointname }}"
- host_name: "www.google.com"
- tags:
- testing: testing
- delete: on-exit
- foo: bar
- check_mode: yes
-
-- name: Create a Azure CDN endpoint
- azure_rm_cdnendpoint:
- resource_group: "{{ resource_group }}"
- name: "{{ endpointname }}"
- profile_name: "{{ cdnprofilename }}"
- origins:
- - name: "org{{ endpointname }}"
- host_name: "www.google.com"
- tags:
- testing: testing
- delete: on-exit
- foo: bar
- register: output
-
-- name: Assert the Azure CDN endpoint is well created
- assert:
- that:
- - output.changed
- - output.id
-
-- name: Get facts of a Azure CDN endpoint
- azure_rm_cdnendpoint_info:
- resource_group: "{{ resource_group }}"
- name: "{{ endpointname }}"
- profile_name: "{{ cdnprofilename }}"
- register: facts
-
-- name: Assert facts output
- assert:
- that:
- - facts['cdnendpoints'] | length == 1
- - facts['cdnendpoints'][0]['id']
- - facts['cdnendpoints'][0]['name']
- - facts['cdnendpoints'][0]['profile_name']
- - facts['cdnendpoints'][0]['origin']
- - facts['cdnendpoints'][0]['location']
- - facts['cdnendpoints'][0]['provisioning_state']
- - facts['cdnendpoints'][0]['resource_state']
-
-- name: Create a Azure CDN endpoint(idempotent)
- azure_rm_cdnendpoint:
- resource_group: "{{ resource_group }}"
- name: "{{ endpointname }}"
- profile_name: "{{ cdnprofilename }}"
- origins:
- - name: "org{{ endpointname }}"
- host_name: "www.google.com"
- tags:
- testing: testing
- delete: on-exit
- foo: bar
- register: output
-
-- name: Assert idempotent
- assert:
- that:
- - not output.changed
-
-- name: Stop a Azure CDN endpoint
- azure_rm_cdnendpoint:
- resource_group: "{{ resource_group }}"
- name: "{{ endpointname }}"
- profile_name: "{{ cdnprofilename }}"
- started: False
- register: output
-
-- name: Assert stopped
- assert:
- that:
- - output.changed
-
-- name: Stop a Azure CDN endpoint(idempotent)
- azure_rm_cdnendpoint:
- resource_group: "{{ resource_group }}"
- name: "{{ endpointname }}"
- profile_name: "{{ cdnprofilename }}"
- started: False
- register: output
-
-- name: Assert still stopped and not changed
- assert:
- that:
- - not output.changed
-
-- name: Start a Azure CDN endpoint
- azure_rm_cdnendpoint:
- resource_group: "{{ resource_group }}"
- name: "{{ endpointname }}"
- profile_name: "{{ cdnprofilename }}"
- started: True
- register: output
-
-- name: Assert started
- assert:
- that:
- - output.changed
-
-- name: Update the Azure CDN endpoint
- azure_rm_cdnendpoint:
- resource_group: "{{ resource_group }}"
- name: "{{ endpointname }}"
- profile_name: "{{ cdnprofilename }}"
- origin_path: /test/
- tags:
- testing: testing
- delete: on-exit
- foo: baz
- register: output
-
-- name: Assert the Azure CDN endpoint is updated
- assert:
- that:
- - output.changed
-
-- name: Delete a Azure CDN endpoint(check mode)
- azure_rm_cdnendpoint:
- resource_group: "{{ resource_group }}"
- name: "{{ endpointname }}"
- profile_name: "{{ cdnprofilename }}"
- state: absent
- check_mode: yes
-
-- name: Delete a Azure CDN endpoint
- azure_rm_cdnendpoint:
- resource_group: "{{ resource_group }}"
- name: "{{ endpointname }}"
- profile_name: "{{ cdnprofilename }}"
- state: absent
-
-- name: Delete the CDN profile
- azure_rm_cdnprofile:
- resource_group: "{{ resource_group }}"
- name: "{{ cdnprofilename }}"
- state: absent
- register: output
-
-- name: Assert the CDN profile is well deleted
- assert:
- that:
- - output.changed
-
-- name: Get CDN profile fact
- azure_rm_cdnprofile_info:
- resource_group: "{{ resource_group }}"
- name: "{{ cdnprofilename }}"
- register: fact
-
-- name: Assert fact returns empty
- assert:
- that:
- - "fact.cdnprofiles | length == 0"
diff --git a/test/integration/targets/azure_rm_containerinstance/aliases b/test/integration/targets/azure_rm_containerinstance/aliases
deleted file mode 100644
index 93066dcc2d..0000000000
--- a/test/integration/targets/azure_rm_containerinstance/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-destructive
-shippable/azure/group2
-azure_rm_containerinstance_info
diff --git a/test/integration/targets/azure_rm_containerinstance/meta/main.yml b/test/integration/targets/azure_rm_containerinstance/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_containerinstance/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_containerinstance/tasks/main.yml b/test/integration/targets/azure_rm_containerinstance/tasks/main.yml
deleted file mode 100644
index a76e387a30..0000000000
--- a/test/integration/targets/azure_rm_containerinstance/tasks/main.yml
+++ /dev/null
@@ -1,214 +0,0 @@
-- name: Create sample container instance
- azure_rm_containerinstance:
- resource_group: "{{ resource_group }}"
- name: "aci{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- os_type: linux
- ip_address: public
- location: eastus
- ports:
- - 80
- containers:
- - name: mycontainer1
- image: httpd
- memory: 1.5
- ports:
- - 80
- - 81
- register: output
-
-- debug:
- var: output
-
-- name: Assert the container instance is well created
- assert:
- that:
- - output.changed
- - output.provisioning_state == 'Succeeded'
-
-- name: Create sample container instance -- same parameters
- azure_rm_containerinstance:
- resource_group: "{{ resource_group }}"
- name: "aci{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- os_type: linux
- ip_address: public
- location: eastus
- ports:
- - 80
- containers:
- - name: mycontainer1
- image: httpd
- memory: 1.5
- ports:
- - 80
- - 81
- register: output
-
-- name: Assert the container instance is well created
- assert:
- that:
- - output.changed == False
-
-- name: Create sample container instance -- force update
- azure_rm_containerinstance:
- resource_group: "{{ resource_group }}"
- name: "aci{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- os_type: linux
- ip_address: public
- location: eastus
- ports:
- - 80
- containers:
- - name: mycontainer1
- image: httpd
- memory: 1.5
- ports:
- - 80
- - 81
- force_update: yes
- register: output
-
-- name: Assert the container instance is well created
- assert:
- that:
- - output.changed
- - output.provisioning_state == 'Succeeded'
-
-- name: Create second container instance for testing purposes
- azure_rm_containerinstance:
- resource_group: "{{ resource_group }}"
- name: "aci{{ resource_group | hash('md5') | truncate(7, True, '') }}sec"
- os_type: linux
- ip_address: public
- dns_name_label: mydnslabel{{ resource_group | hash('md5') | truncate(7, True, '') }}
- location: eastus
- restart_policy: on_failure
- ports:
- - 80
- containers:
- - name: mycontainer1
- image: httpd
- memory: 1.5
- ports:
- - 80
- - 81
- commands:
- - echo abc
- - echo cdf
- environment_variables:
- - name: myvar
- value: myvarvalue
- register: output
-
-- name: Gather facts for single Container Instance
- azure_rm_containerinstance_info:
- resource_group: "{{ resource_group }}"
- name: "aci{{ resource_group | hash('md5') | truncate(7, True, '') }}sec"
- register: output
-
-- debug:
- var: output
-
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.containerinstances[0]['resource_group'] != None
- - output.containerinstances[0]['name'] != None
- - output.containerinstances[0]['os_type'] != None
- - output.containerinstances[0]['location'] != None
- - output.containerinstances[0]['ip_address'] != None
- - output.containerinstances[0]['ports'] != None
- - output.containerinstances[0]['containers'] != None
- - output.containerinstances[0]['containers'][0]['commands'] | length == 2
- - output.containerinstances[0]['containers'][0]['environment_variables'] | length == 1
- - output.containerinstances[0]['restart_policy'] == 'on_failure'
-
-- name: Gather facts for all Container Instances in the resource group
- azure_rm_containerinstance_info:
- resource_group: "{{ resource_group }}"
- register: output
-
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.containerinstances[0]['resource_group'] != None
- - output.containerinstances[0]['name'] != None
- - output.containerinstances[0]['os_type'] != None
- - output.containerinstances[0]['location'] != None
- - output.containerinstances[0]['ip_address'] != None
- - output.containerinstances[0]['ports'] != None
- - output.containerinstances[0]['containers'] != None
- - output.containerinstances[1]['resource_group'] != None
- - output.containerinstances[1]['name'] != None
- - output.containerinstances[1]['os_type'] != None
- - output.containerinstances[1]['location'] != None
- - output.containerinstances[1]['ip_address'] != None
- - output.containerinstances[1]['ports'] != None
- - output.containerinstances[1]['containers'] != None
-
-- name: Remove container instance
- azure_rm_containerinstance:
- resource_group: "{{ resource_group }}"
- name: "aci{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- os_type: linux
- ip_address: public
- location: eastus
- ports:
- - 80
- containers:
- - name: mycontainer1
- image: httpd
- memory: 1.5
- ports:
- - 80
- - 81
- state: absent
- register: output
-
-- name: Assert the container instance is deleted
- assert:
- that:
- - output.changed
-
-- name: Remove container instance
- azure_rm_containerinstance:
- resource_group: "{{ resource_group }}"
- name: "aci{{ resource_group | hash('md5') | truncate(7, True, '') }}sec"
- os_type: linux
- ip_address: public
- location: eastus
- ports:
- - 80
- containers:
- - name: mycontainer1
- image: httpd
- memory: 1.5
- ports:
- - 80
- - 81
- state: absent
-
-- name: Remove container instance again
- azure_rm_containerinstance:
- resource_group: "{{ resource_group }}"
- name: "aci{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- os_type: linux
- ip_address: public
- location: eastus
- ports:
- - 80
- containers:
- - name: mycontainer1
- image: httpd
- memory: 1.5
- ports:
- - 80
- - 81
- state: absent
- register: output
-
-- name: Assert the changed is false
- assert:
- that:
- - output.changed == False
diff --git a/test/integration/targets/azure_rm_containerregistry/aliases b/test/integration/targets/azure_rm_containerregistry/aliases
deleted file mode 100644
index 2615d3fe02..0000000000
--- a/test/integration/targets/azure_rm_containerregistry/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group2
-destructive
-azure_rm_containerregistry_info
diff --git a/test/integration/targets/azure_rm_containerregistry/meta/main.yml b/test/integration/targets/azure_rm_containerregistry/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_containerregistry/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_containerregistry/tasks/main.yml b/test/integration/targets/azure_rm_containerregistry/tasks/main.yml
deleted file mode 100644
index 7c83c5c5da..0000000000
--- a/test/integration/targets/azure_rm_containerregistry/tasks/main.yml
+++ /dev/null
@@ -1,116 +0,0 @@
- - name: Create an container registry
- azure_rm_containerregistry:
- name: "acr{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- resource_group: "{{ resource_group }}"
- location: eastus2
- admin_user_enabled: true
- sku: Premium
- tags:
- Release: beta1
- Environment: Production
- register: output
-
- - name: Assert the container registry instance is well created
- assert:
- that:
- - output.changed
- - output.admin_user_enabled
- - output.location == 'eastus2'
- - output.sku == 'Premium'
- - output.tags['Environment'] == 'Production'
- - output.tags['Release'] == 'beta1'
- - output.provisioning_state == 'Succeeded'
- - output.credentials['password'] is defined
- - output.credentials['password2'] is defined
-
- - name: Update the ACR instance sku, tags and admin_user_enabled
- azure_rm_containerregistry:
- name: "acr{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- resource_group: "{{ resource_group }}"
- location: eastus2
- admin_user_enabled: false
- sku: Standard
- tags:
- NewTag: newtag
- Release: beta1
- Environment: Production
- register: output
-
- - name: Create second container registry (to test facts)
- azure_rm_containerregistry:
- name: "acr{{ resource_group | hash('md5') | truncate(7, True, '') }}sec"
- resource_group: "{{ resource_group }}"
- location: eastus2
- admin_user_enabled: false
- sku: Premium
- tags:
- Release: beta1
- Environment: Production
-
- - name: Assert the ACR instance is well updated
- assert:
- that:
- - output.changed == True
- - output.admin_user_enabled == False
- - output.sku == 'Standard'
- - output.tags['NewTag'] == 'newtag'
- - output.credentials | length == 0
- - output.credentials['password'] is not defined
- - output.credentials['password2'] is not defined
-
- - name: Gather facts for single Container Registry
- azure_rm_containerregistry_info:
- resource_group: "{{ resource_group }}"
- name: "acr{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- register: output
-
- - name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.registries[0]['name'] != None
- - output.registries[0]['location'] != None
- - output.registries[0]['admin_user_enabled'] != None
- - output.registries[0]['sku'] != None
- - output.registries[0]['provisioning_state'] != None
- - output.registries[0]['login_server'] != None
- - output.registries[0]['id'] != None
- - output.registries[0]['credentials'] != None
-
- - name: Gather facts for all Container Registries in the resource group
- azure_rm_containerregistry_info:
- resource_group: "{{ resource_group }}"
- register: output
-
- - name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.registries[0]['name'] != None
- - output.registries[0]['location'] != None
- - output.registries[0]['admin_user_enabled'] != None
- - output.registries[0]['sku'] != None
- - output.registries[0]['provisioning_state'] != None
- - output.registries[0]['login_server'] != None
- - output.registries[0]['id'] != None
- - output.registries[0]['credentials'] != None
- - output.registries[1]['name'] != None
- - output.registries[1]['location'] != None
- - output.registries[1]['admin_user_enabled'] != None
- - output.registries[1]['sku'] != None
- - output.registries[1]['provisioning_state'] != None
- - output.registries[1]['login_server'] != None
- - output.registries[1]['id'] != None
- - output.registries[1]['credentials'] != None
-
- - name: Delete first container registry
- azure_rm_containerregistry:
- name: "acr{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- resource_group: "{{ resource_group }}"
- state: absent
-
- - name: Delete second container registry
- azure_rm_containerregistry:
- name: "acr{{ resource_group | hash('md5') | truncate(7, True, '') }}sec"
- resource_group: "{{ resource_group }}"
- state: absent
diff --git a/test/integration/targets/azure_rm_cosmosdbaccount/aliases b/test/integration/targets/azure_rm_cosmosdbaccount/aliases
deleted file mode 100644
index 8a081bd43e..0000000000
--- a/test/integration/targets/azure_rm_cosmosdbaccount/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-destructive
-shippable/azure/group1
-azure_rm_cosmosdbaccount_info
diff --git a/test/integration/targets/azure_rm_cosmosdbaccount/meta/main.yml b/test/integration/targets/azure_rm_cosmosdbaccount/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_cosmosdbaccount/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_cosmosdbaccount/tasks/main.yml b/test/integration/targets/azure_rm_cosmosdbaccount/tasks/main.yml
deleted file mode 100644
index f50aa47c55..0000000000
--- a/test/integration/targets/azure_rm_cosmosdbaccount/tasks/main.yml
+++ /dev/null
@@ -1,249 +0,0 @@
-- name: Prepare random number
- set_fact:
- dbname: "cosmos{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- vnname: "vn{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- subnetname: "subnet{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-- name: Create virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: "{{ vnname }}"
- address_prefixes_cidr:
- - 10.1.0.0/16
- - 172.100.0.0/16
- dns_servers:
- - 127.0.0.1
- - 127.0.0.3
-
-- name: Add subnet
- azure_rm_subnet:
- name: "{{ subnetname }}"
- virtual_network_name: "{{ vnname }}"
- resource_group: "{{ resource_group }}"
- address_prefix_cidr: "10.1.0.0/24"
-
-- name: Create instance of Database Account -- check mode
- azure_rm_cosmosdbaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ dbname }}"
- location: eastus
- geo_rep_locations:
- - name: eastus
- failover_priority: 0
- database_account_offer_type: Standard
- check_mode: yes
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create instance of Database Account
- azure_rm_cosmosdbaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ dbname }}"
- location: eastus
- kind: global_document_db
- geo_rep_locations:
- - name: eastus
- failover_priority: 0
- - name: westus
- failover_priority: 1
- database_account_offer_type: Standard
- is_virtual_network_filter_enabled: yes
- virtual_network_rules:
- - subnet:
- resource_group: "{{ resource_group }}"
- virtual_network_name: "{{ vnname }}"
- subnet_name: "{{ subnetname }}"
- ignore_missing_vnet_service_endpoint: yes
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create again instance of Database Account
- azure_rm_cosmosdbaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ dbname }}"
- location: eastus
- kind: global_document_db
- geo_rep_locations:
- - name: eastus
- failover_priority: 0
- - name: westus
- failover_priority: 1
- database_account_offer_type: Standard
- is_virtual_network_filter_enabled: yes
- virtual_network_rules:
- - subnet:
- resource_group: "{{ resource_group }}"
- virtual_network_name: "{{ vnname }}"
- subnet_name: "{{ subnetname }}"
- ignore_missing_vnet_service_endpoint: yes
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed == false
-
-- name: Create again instance of Database Account -- change something
- azure_rm_cosmosdbaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ dbname }}"
- location: eastus
- kind: global_document_db
- geo_rep_locations:
- - name: eastus
- failover_priority: 0
- - name: westus
- failover_priority: 1
- database_account_offer_type: Standard
- is_virtual_network_filter_enabled: yes
- virtual_network_rules:
- - subnet:
- resource_group: "{{ resource_group }}"
- virtual_network_name: "{{ vnname }}"
- subnet_name: "{{ subnetname }}"
- ignore_missing_vnet_service_endpoint: yes
- enable_automatic_failover: yes
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed
-
-- name: Get facts of single account
- azure_rm_cosmosdbaccount_info:
- resource_group: "{{ resource_group }}"
- name: "{{ dbname }}"
- register: output
-
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.accounts[0]['id'] != None
- - output.accounts[0]['resource_group'] != None
- - output.accounts[0]['name'] != None
- - output.accounts[0]['location'] != None
- - output.accounts[0]['kind'] != None
- - output.accounts[0]['consistency_policy'] != None
- - output.accounts[0]['failover_policies'] != None
- - output.accounts[0]['read_locations'] != None
- - output.accounts[0]['write_locations'] != None
- - output.accounts[0]['database_account_offer_type'] != None
- - output.accounts[0]['ip_range_filter'] != None
- - output.accounts[0]['is_virtual_network_filter_enabled'] != None
- - output.accounts[0]['enable_automatic_failover'] != None
- - output.accounts[0]['enable_cassandra'] != None
- - output.accounts[0]['enable_table'] != None
- - output.accounts[0]['enable_gremlin'] != None
- - output.accounts[0]['virtual_network_rules'] != None
- - output.accounts[0]['enable_multiple_write_locations'] != None
- - output.accounts[0]['document_endpoint'] != None
- - output.accounts[0]['provisioning_state'] != None
- - output.accounts[0]['tags'] != None
-
-- name: Get facts with keys
- azure_rm_cosmosdbaccount_info:
- resource_group: "{{ resource_group }}"
- name: "{{ dbname }}"
- retrieve_keys: all
- register: output
-
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.accounts[0]['primary_master_key'] != None
- - output.accounts[0]['secondary_master_key'] != None
- - output.accounts[0]['primary_readonly_master_key'] != None
- - output.accounts[0]['secondary_readonly_master_key'] != None
-
-- name: Get facts with readonly keys
- azure_rm_cosmosdbaccount_info:
- resource_group: "{{ resource_group }}"
- name: "{{ dbname }}"
- retrieve_keys: readonly
- retrieve_connection_strings: yes
- register: output
-
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - "'primary_master_key' not in output.accounts[0]"
- - "'secondary_master_key' not in output.accounts[0]"
- - output.accounts[0]['primary_readonly_master_key'] != None
- - output.accounts[0]['secondary_readonly_master_key'] != None
- - output.accounts[0]['connection_strings'] | length > 0
-
-- name: List acounts by resource group
- azure_rm_cosmosdbaccount_info:
- resource_group: "{{ resource_group }}"
- name: "{{ dbname }}"
- register: output
-
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.accounts[0]['id'] != None
- - output.accounts[0]['resource_group'] != None
- - output.accounts[0]['name'] != None
- - output.accounts[0]['location'] != None
- - output.accounts[0]['kind'] != None
- - output.accounts[0]['consistency_policy'] != None
- - output.accounts[0]['failover_policies'] != None
- - output.accounts[0]['read_locations'] != None
- - output.accounts[0]['write_locations'] != None
- - output.accounts[0]['database_account_offer_type'] != None
- - output.accounts[0]['ip_range_filter'] != None
- - output.accounts[0]['is_virtual_network_filter_enabled'] != None
- - output.accounts[0]['enable_automatic_failover'] != None
- - output.accounts[0]['enable_cassandra'] != None
- - output.accounts[0]['enable_table'] != None
- - output.accounts[0]['enable_gremlin'] != None
- - output.accounts[0]['virtual_network_rules'] != None
- - output.accounts[0]['enable_multiple_write_locations'] != None
- - output.accounts[0]['document_endpoint'] != None
- - output.accounts[0]['provisioning_state'] != None
- - output.accounts[0]['tags'] != None
-
-- name: Delete instance of Database Account -- check mode
- azure_rm_cosmosdbaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ dbname }}"
- state: absent
- check_mode: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete instance of Database Account
- azure_rm_cosmosdbaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ dbname }}"
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-# currently disabled as there's a bug in SDK / Service
-#- name: Delete unexisting instance of Database Account
-# azure_rm_cosmosdbaccount:
-# resource_group: "{{ resource_group }}"
-# name: "{{ dbname }}"
-# state: absent
-# register: output
-#- name: Assert the state has changed
-# assert:
-# that:
-# - output.changed == false
diff --git a/test/integration/targets/azure_rm_deployment/aliases b/test/integration/targets/azure_rm_deployment/aliases
deleted file mode 100644
index 095e5ec347..0000000000
--- a/test/integration/targets/azure_rm_deployment/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/azure
-destructive
-shippable/azure/group1
diff --git a/test/integration/targets/azure_rm_deployment/meta/main.yml b/test/integration/targets/azure_rm_deployment/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_deployment/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_deployment/tasks/main.yml b/test/integration/targets/azure_rm_deployment/tasks/main.yml
deleted file mode 100644
index fb3d195151..0000000000
--- a/test/integration/targets/azure_rm_deployment/tasks/main.yml
+++ /dev/null
@@ -1,45 +0,0 @@
-- name: Create random dns label
- set_fact:
- dns_label: "test{{ resource_group | hash('md5') | truncate(16, True, '') + (65535 | random | string) }}"
-
-- name: Create Azure Deploy
- azure_rm_deployment:
- resource_group: "{{ resource_group }}"
- location: "eastus"
- template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/d01a5c06f4f1bc03a049ca17bbbd6e06d62657b3/101-vm-simple-linux/azuredeploy.json'
- deployment_name: "{{ dns_label }}"
- parameters:
- adminUsername:
- value: chouseknecht
- adminPassword:
- value: password123!
- dnsLabelPrefix:
- value: "{{ dns_label }}"
- ubuntuOSVersion:
- value: "16.04.0-LTS"
- register: output
-
-- name: Add new instance to host group
- add_host:
- hostname: "{{ item.vm_name }}"
- ansible_host: "{{ item['ips'][0].public_ip }}"
- ansible_user: chouseknecht
- ansible_ssh_pass: password123!
- groupname: azure_vms
- with_items: "{{ output.deployment.instances }}"
-
-- name: Get Deployment Facts
- azure_rm_deployment_info:
- resource_group: "{{ resource_group }}"
- name: "{{ dns_label }}"
- register: output
-- debug:
- var: output
-
-- name: Assert that values are returned
- assert:
- that:
- - not output.changed
- - output.deployments[0]['provisioning_state'] != None
- - output.deployments[0]['output_resources'] | length > 0
- - output.deployments[0]['outputs'] | length > 0
diff --git a/test/integration/targets/azure_rm_devtestlab/aliases b/test/integration/targets/azure_rm_devtestlab/aliases
deleted file mode 100644
index fc6fecf8d5..0000000000
--- a/test/integration/targets/azure_rm_devtestlab/aliases
+++ /dev/null
@@ -1,17 +0,0 @@
-cloud/azure
-destructive
-shippable/azure/group2
-azure_rm_devtestlab_facts
-azure_rm_devtestlabarmtemplate
-azure_rm_devtestlabartifact
-azure_rm_devtestlabartifactsource_facts
-azure_rm_devtestlabartifactsource
-azure_rm_devtestlabcustomimage
-azure_rm_devtestlabpolicy
-azure_rm_devtestlabschedule
-azure_rm_devtestlabvirtualmachine_facts
-azure_rm_devtestlabvirtualmachine_facts
-azure_rm_devtestlabvirtualnetwork_facts
-azure_rm_devtestlabvirtualnetwork
-disabled
-
diff --git a/test/integration/targets/azure_rm_devtestlab/meta/main.yml b/test/integration/targets/azure_rm_devtestlab/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_devtestlab/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_devtestlab/tasks/main.yml b/test/integration/targets/azure_rm_devtestlab/tasks/main.yml
deleted file mode 100644
index e8f8b5b284..0000000000
--- a/test/integration/targets/azure_rm_devtestlab/tasks/main.yml
+++ /dev/null
@@ -1,766 +0,0 @@
-- name: Prepare random number
- set_fact:
- lab_name: "lab{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- vn_name: "vn{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- vm_name: "vn{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- artifacts_name: myartifacts
- github_token: "{{ lookup('env','GITHUB_ACCESS_TOKEN') }}"
- run_once: yes
-
-- name: Create instance of Lab -- check mode
- azure_rm_devtestlab:
- resource_group: "{{ resource_group }}"
- name: "{{ lab_name }}"
- location: eastus
- storage_type: standard
- premium_data_disks: no
- check_mode: yes
- register: output
-- name: Check if the change was correctly reported
- assert:
- that:
- - output.changed
-
-- name: Create instance of Lab
- azure_rm_devtestlab:
- resource_group: "{{ resource_group }}"
- name: "{{ lab_name }}"
- location: eastus
- storage_type: standard
- premium_data_disks: no
- register: output_lab
-- name: Check if the change was correctly reported
- assert:
- that:
- - output.changed
-
-- name: Create again instance of Lab
- azure_rm_devtestlab:
- resource_group: "{{ resource_group }}"
- name: "{{ lab_name }}"
- location: eastus
- storage_type: standard
- premium_data_disks: no
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed == false
-
-- name: Update lab - premium_data_disks
- azure_rm_devtestlab:
- resource_group: "{{ resource_group }}"
- name: "{{ lab_name }}"
- location: eastus
- storage_type: standard
- premium_data_disks: yes
- register: output
-- name: Assert the change was registered
- assert:
- that:
- - output.changed
-
-- name: List DevTest Lab in a resource group
- azure_rm_devtestlab_facts:
- resource_group: "{{ resource_group }}"
- register: output_lab
-- name: Assert that facts are returned
- assert:
- that:
- - output_lab.changed == False
- - output_lab.labs[0]['id'] != None
- - output_lab.labs[0]['resource_group'] != None
- - output_lab.labs[0]['name'] != None
- - output_lab.labs[0]['location'] != None
- - output_lab.labs[0]['storage_type'] != None
- - output_lab.labs[0]['premium_data_disks'] != None
- - output_lab.labs[0]['provisioning_state'] != None
- - output_lab.labs[0]['vault_name'] != None
-
-- name: Get DevTest Lab facts
- azure_rm_devtestlab_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ lab_name }}"
- register: output_lab
-- name: Assert that facts are returned
- assert:
- that:
- - output_lab.changed == False
- - output_lab.labs[0]['id'] != None
- - output_lab.labs[0]['resource_group'] != None
- - output_lab.labs[0]['name'] != None
- - output_lab.labs[0]['location'] != None
- - output_lab.labs[0]['storage_type'] != None
- - output_lab.labs[0]['premium_data_disks'] != None
- - output_lab.labs[0]['provisioning_state'] != None
- - output_lab.labs[0]['artifacts_storage_account'] != None
- - output_lab.labs[0]['default_premium_storage_account'] != None
- - output_lab.labs[0]['default_storage_account'] != None
- - output_lab.labs[0]['premium_data_disk_storage_account'] != None
- - output_lab.labs[0]['vault_name'] != None
-
-# azure_rm_devtestlabpolicy
-- name: Create instance of DevTest Lab Policy
- azure_rm_devtestlabpolicy:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- policy_set_name: default
- name: myDtlPolicy
- fact_name: user_owned_lab_vm_count
- threshold: 5
- register: output
-- debug:
- var: output
-- name: Assert if the change was correctly reported
- assert:
- that:
- - output.changed
-
-- name: Create instance of DevTest Lab Policy -- idempotent
- azure_rm_devtestlabpolicy:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- policy_set_name: default
- name: myDtlPolicy
- fact_name: user_owned_lab_vm_count
- threshold: 5
- register: output
-- debug:
- var: output
-- name: Assert if the change was not reported
- assert:
- that:
- - not output.changed
-
-- name: Create instance of DevTest Lab Policy -- change value
- azure_rm_devtestlabpolicy:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- policy_set_name: default
- name: myDtlPolicy
- fact_name: user_owned_lab_vm_count
- threshold: 6
- register: output
-- debug:
- var: output
-- name: Assert if the change was correctly reported
- assert:
- that:
- - output.changed
-
-- name: Delete instance of DevTest Lab Policy
- azure_rm_devtestlabpolicy:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- policy_set_name: default
- name: myDtlPolicy
- state: absent
- register: output
-- debug:
- var: output
-- name: Assert if the change was correctly reported
- assert:
- that:
- - output.changed
-
-# azure_rm_devtestlabschedule
-- name: Create instance of DevTest Lab Schedule
- azure_rm_devtestlabschedule:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: lab_vms_shutdown
- time: "1030"
- time_zone_id: "UTC+12"
- register: output
-- debug:
- var: output
-- name: Assert if the change was correctly reported
- assert:
- that:
- - output.changed
-
-- name: Update instance of DevTest Lab Schedule -- idempotent
- azure_rm_devtestlabschedule:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: lab_vms_shutdown
- time: "1030"
- time_zone_id: "UTC+12"
- register: output
-- debug:
- var: output
-- name: Assert if the change was correctly reported
- assert:
- that:
- - not output.changed
-
-- name: Update instance of DevTest Lab Schedule -- change time
- azure_rm_devtestlabschedule:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: lab_vms_shutdown
- time: "1130"
- time_zone_id: "UTC+12"
- register: output
-- debug:
- var: output
-- name: Assert if the change was correctly reported
- assert:
- that:
- - output.changed
-
-- name: Delete instance of DevTest Lab Schedule
- azure_rm_devtestlabschedule:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: lab_vms_shutdown
- state: absent
- register: output
-- debug:
- var: output
-- name: Assert if the change was correctly reported
- assert:
- that:
- - output.changed
-
-- name: Create instance of DevTest Labs virtual network
- azure_rm_devtestlabvirtualnetwork:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: "{{ vn_name }}"
- location: eastus
- description: My DevTest Lab
- register: output
-- name: Assert the change was registered
- assert:
- that:
- - output.changed
-
-- name: Update instance of DevTest Labs virtual network with same parameters
- azure_rm_devtestlabvirtualnetwork:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: "{{ vn_name }}"
- location: eastus
- description: My DevTest Lab
- register: output
-- name: Assert that nothing was changed
- assert:
- that:
- - output.changed == false
-
-- name: Update instance of DevTest Labs virtual network with changed description
- azure_rm_devtestlabvirtualnetwork:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: "{{ vn_name }}"
- location: eastus
- description: My DevTest Lab Updated
- register: output
-- name: Assert that nothing was changed
- assert:
- that:
- - output.changed
-
-- name: Get DevTest Lab Virtual Network facts
- azure_rm_devtestlabvirtualnetwork_facts:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: "{{ vn_name }}"
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.virtualnetworks[0]['id'] != None
- - output.virtualnetworks[0]['resource_group'] != None
- - output.virtualnetworks[0]['lab_name'] != None
- - output.virtualnetworks[0]['name'] != None
- - output.virtualnetworks[0]['external_provider_resource_id'] != None
- - output.virtualnetworks[0]['description'] != None
- - output.virtualnetworks[0]['provisioning_state'] != None
-
-- name: List all Virtual Networks in DevTest Lab
- azure_rm_devtestlabvirtualnetwork_facts:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.virtualnetworks[0]['id'] != None
- - output.virtualnetworks[0]['resource_group'] != None
- - output.virtualnetworks[0]['lab_name'] != None
- - output.virtualnetworks[0]['name'] != None
- - output.virtualnetworks[0]['external_provider_resource_id'] != None
- - output.virtualnetworks[0]['description'] != None
- - output.virtualnetworks[0]['provisioning_state'] != None
-
-- name: Create instance of DevTest Labs artifacts source
- azure_rm_devtestlabartifactsource:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: "{{ artifacts_name }}"
- uri: https://github.com/Azure/azure_preview_modules.git
- source_type: github
- folder_path: /tasks
- security_token: "{{ github_token }}"
- register: output
- when: "github_token | length > 0"
-- name: Assert if the change was correctly reported
- assert:
- that:
- - output.changed
- when: "github_token | length > 0"
-
-- name: Update instance of DevTest Labs artifacts source with same parameters
- azure_rm_devtestlabartifactsource:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: "{{ artifacts_name }}"
- uri: https://github.com/Azure/azure_preview_modules.git
- source_type: github
- folder_path: /tasks
- security_token: "{{ github_token }}"
- register: output
- when: "github_token | length > 0"
-- name: Assert that nothing was changed
- assert:
- that:
- - output.changed == false
- when: "github_token | length > 0"
-
-- name: Update instance of DevTest Labs artifacts source, add display name, change folder
- azure_rm_devtestlabartifactsource:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: "{{ artifacts_name }}"
- uri: https://github.com/Azure/azure_preview_modules.git
- source_type: github
- folder_path: /library
- security_token: "{{ github_token }}"
- display_name: My Artifacts Source
- register: output
- when: "github_token | length > 0"
-- name: Assert that nothing was changed
- assert:
- that:
- - output.changed
- when: "github_token | length > 0"
-
-- name:
- set_fact:
- artifact_source:
- - source_name: "public repo"
- source_path: "/Artifacts/linux-install-mongodb"
- when: "github_token | length > 0"
-
-- name:
- set_fact:
- artifact_source: null
- when: "github_token | length == 0"
-
-- name: Create instance of DTL Virtual Machine
- azure_rm_devtestlabvirtualmachine:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: "{{ vm_name }}"
- notes: Virtual machine notes, just something....
- os_type: linux
- vm_size: Standard_A2_v2
- user_name: dtladmin
- password: ZSasfovobocu$$21!
- lab_subnet:
- virtual_network_name: "{{ vn_name }}"
- name: "{{ vn_name }}Subnet"
- disallow_public_ip_address: no
- image:
- offer: UbuntuServer
- publisher: Canonical
- sku: 16.04-LTS
- os_type: Linux
- version: latest
- artifacts: "{{ artifact_source }}"
- allow_claim: no
- expiration_date: "2029-02-22T01:49:12.117974Z"
- register: output
- when: "github_token | length > 0"
-
-- name: Assert that change was registered
- assert:
- that:
- - output.changed
- when: "github_token | length > 0"
-
-- name: Update instance of DTL Virtual Machine with same parameters
- azure_rm_devtestlabvirtualmachine:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: "{{ vm_name }}"
- notes: Virtual machine notes, just something....
- os_type: linux
- vm_size: Standard_A2_v2
- user_name: dtladmin
- password: ZSasfovobocu$$21!
- lab_subnet:
- virtual_network_name: "{{ vn_name }}"
- name: "{{ vn_name }}Subnet"
- disallow_public_ip_address: no
- image:
- offer: UbuntuServer
- publisher: Canonical
- sku: 16.04-LTS
- os_type: Linux
- version: latest
- artifacts: "{{ artifact_source }}"
- allow_claim: no
- expiration_date: "2029-02-22T01:49:12.117974Z"
- register: output
- when: "github_token | length > 0"
-
-- name: Assert that nothing has changed
- assert:
- that:
- - output.changed == false
- when: "github_token | length > 0"
-
-- name: Update instance of DTL Virtual Machine - change notes
- azure_rm_devtestlabvirtualmachine:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: "{{ vm_name }}"
- notes: Virtual machine notes, just something.... more text
- os_type: linux
- vm_size: Standard_A2_v2
- user_name: dtladmin
- password: ZSasfovobocu$$21!
- lab_subnet:
- virtual_network_name: "{{ vn_name }}"
- name: "{{ vn_name }}Subnet"
- disallow_public_ip_address: no
- image:
- offer: UbuntuServer
- publisher: Canonical
- sku: 16.04-LTS
- os_type: Linux
- version: latest
- artifacts: "{{ artifact_source }}"
- allow_claim: no
- expiration_date: "2029-02-22T01:49:12.117974Z"
- register: output
- when: "github_token | length > 0"
-
-- name: Assert that change was registered
- assert:
- that:
- - output.changed
- when: "github_token | length > 0"
-
-- name: Get Facts of DTL Virtual Machine
- azure_rm_devtestlabvirtualmachine_facts:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: "{{ vm_name }}"
- register: output_vm
-- name: Assert that facts are returned
- assert:
- that:
- - output_vm.changed == False
- - output_vm.virtualmachines[0]['id'] != None
- - output_vm.virtualmachines[0]['resource_group'] != None
- - output_vm.virtualmachines[0]['lab_name'] != None
- - output_vm.virtualmachines[0]['name'] != None
- - output_vm.virtualmachines[0]['compute_vm_id'] != None
- - output_vm.virtualmachines[0]['compute_vm_resource_group'] != None
- - output_vm.virtualmachines[0]['compute_vm_name'] != None
- - output_vm.virtualmachines[0]['disallow_public_ip_address'] != None
- - output_vm.virtualmachines[0]['expiration_date'] != None
- - output_vm.virtualmachines[0]['fqdn'] != None
- - output_vm.virtualmachines[0]['id'] != None
- - output_vm.virtualmachines[0]['image'] != None
- - output_vm.virtualmachines[0]['notes'] != None
- - output_vm.virtualmachines[0]['os_type'] != None
- - output_vm.virtualmachines[0]['provisioning_state'] != None
- - output_vm.virtualmachines[0]['storage_type'] != None
- - output_vm.virtualmachines[0]['user_name'] != None
- - output_vm.virtualmachines[0]['vm_size'] != None
- when: "github_token | length > 0"
-
-- name: List Facts of DTL Virtual Machine
- azure_rm_devtestlabvirtualmachine_facts:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- register: output_vm
-- name: Assert that facts are returned
- assert:
- that:
- - output_vm.changed == False
- - output_vm.virtualmachines[0]['id'] != None
- - output_vm.virtualmachines[0]['resource_group'] != None
- - output_vm.virtualmachines[0]['lab_name'] != None
- - output_vm.virtualmachines[0]['name'] != None
- - output_vm.virtualmachines[0]['compute_vm_id'] != None
- - output_vm.virtualmachines[0]['disallow_public_ip_address'] != None
- - output_vm.virtualmachines[0]['expiration_date'] != None
- - output_vm.virtualmachines[0]['fqdn'] != None
- - output_vm.virtualmachines[0]['id'] != None
- - output_vm.virtualmachines[0]['image'] != None
- - output_vm.virtualmachines[0]['notes'] != None
- - output_vm.virtualmachines[0]['os_type'] != None
- - output_vm.virtualmachines[0]['provisioning_state'] != None
- - output_vm.virtualmachines[0]['storage_type'] != None
- - output_vm.virtualmachines[0]['user_name'] != None
- - output_vm.virtualmachines[0]['vm_size'] != None
- when: "github_token | length > 0"
-
-
-- name: List all artifact sources
- azure_rm_devtestlabartifactsource_facts:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.artifactsources[0]['id'] != None
- - output.artifactsources[0]['resource_group'] != None
- - output.artifactsources[0]['lab_name'] != None
- - output.artifactsources[0]['name'] != None
- - output.artifactsources[0]['display_name'] != None
- - output.artifactsources[0]['source_type'] != None
- - output.artifactsources[0]['is_enabled'] != None
- - output.artifactsources[0]['uri'] != None
- - output.artifactsources[0]['folder_path'] != None
- - output.artifactsources[0]['provisioning_state'] != None
- - output.artifactsources | length >= 2
-
-- name: Get artifacts source facts
- azure_rm_devtestlabartifactsource_facts:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: public repo
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.artifactsources[0]['id'] != None
- - output.artifactsources[0]['resource_group'] != None
- - output.artifactsources[0]['lab_name'] != None
- - output.artifactsources[0]['name'] != None
- - output.artifactsources[0]['display_name'] != None
- - output.artifactsources[0]['source_type'] != None
- - output.artifactsources[0]['is_enabled'] != None
- - output.artifactsources[0]['uri'] != None
- - output.artifactsources[0]['folder_path'] != None
- - output.artifactsources[0]['provisioning_state'] != None
-
-- name: Delete instance of DevTest Labs artifacts source
- azure_rm_devtestlabartifactsource:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: "{{ artifacts_name }}"
- state: absent
- register: output
- when: "github_token | length > 0"
-- name: Assert that change was correctly registered
- assert:
- that:
- - output.changed
- when: "github_token | length > 0"
-
-- name: List ARM Template facts
- azure_rm_devtestlabarmtemplate_facts:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- artifact_source_name: "public environment repo"
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.armtemplates[0]['id'] != None
- - output.armtemplates[0]['resource_group'] != None
- - output.armtemplates[0]['lab_name'] != None
- - output.armtemplates[0]['artifact_source_name'] != None
- - output.armtemplates[0]['name'] != None
- - output.armtemplates[0]['display_name'] != None
- - output.armtemplates[0]['description'] != None
- - output.armtemplates[0]['publisher'] != None
- - "output.armtemplates | length > 1"
-
-- name: Get ARM Template facts
- azure_rm_devtestlabarmtemplate_facts:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- artifact_source_name: "public environment repo"
- name: ServiceFabric-LabCluster
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.armtemplates[0]['id'] != None
- - output.armtemplates[0]['resource_group'] != None
- - output.armtemplates[0]['lab_name'] != None
- - output.armtemplates[0]['artifact_source_name'] != None
- - output.armtemplates[0]['name'] != None
- - output.armtemplates[0]['display_name'] != None
- - output.armtemplates[0]['description'] != None
- - output.armtemplates[0]['publisher'] != None
- - "output.armtemplates | length == 1"
-
-
-
-- name: Get Artifact facts
- azure_rm_devtestlabartifact_facts:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- artifact_source_name: "public repo"
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.artifacts[0]['id'] != None
- - output.artifacts[0]['resource_group'] != None
- - output.artifacts[0]['lab_name'] != None
- - output.artifacts[0]['artifact_source_name'] != None
- - output.artifacts[0]['name'] != None
- - output.artifacts[0]['description'] != None
- - output.artifacts[0]['file_path'] != None
- - output.artifacts[0]['publisher'] != None
- - output.artifacts[0]['target_os_type'] != None
- - output.artifacts[0]['publisher'] != None
- - "output.artifacts | length > 1"
-
-- name: Get Artifact facts
- azure_rm_devtestlabartifact_facts:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- artifact_source_name: "public repo"
- name: windows-webdeploy
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.artifacts[0]['id'] != None
- - output.artifacts[0]['resource_group'] != None
- - output.artifacts[0]['lab_name'] != None
- - output.artifacts[0]['artifact_source_name'] != None
- - output.artifacts[0]['name'] != None
- - output.artifacts[0]['description'] != None
- - output.artifacts[0]['file_path'] != None
- - output.artifacts[0]['publisher'] != None
- - output.artifacts[0]['target_os_type'] != None
- - output.artifacts[0]['publisher'] != None
- - "output.artifacts | length == 1"
-
-- name: Create instance of DevTest Lab Environment
- azure_rm_devtestlabenvironment:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- user_name: "@me"
- name: myEnvironment
- location: eastus
- deployment_template: "{{ output_lab.labs[0].id }}/artifactSources/public environment repo/armTemplates/WebApp"
- register: output
-- name: Assert if the change was correctly reported
- assert:
- that:
- - output.changed
- when: "github_token | length > 0"
-
-- name: Create instance of DevTest Lab Environment - idempotent
- azure_rm_devtestlabenvironment:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- user_name: "@me"
- name: myEnvironment
- location: eastus
- deployment_template:
- artifact_source_name: public environment repo
- name: WebApp
- register: output
-- name: Assert if the change was not detected
- assert:
- that:
- - not output.changed
- when: "github_token | length > 0"
-
-- name: Delete instance of DevTest Lab Environment
- azure_rm_devtestlabenvironment:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- user_name: "@me"
- name: myEnvironment
- state: absent
- register: output
-- name: Assert that change was detected
- assert:
- that:
- - output.changed
- when: "github_token | length > 0"
-
-- name: Create instance of DevTest Lab Image
- azure_rm_devtestlabcustomimage:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: myImage
- source_vm: "{{ output_vm.virtualmachines[0]['name'] }}"
- linux_os_state: non_deprovisioned
- register: output
-- name: Assert that change was detected
- assert:
- that:
- - output.changed
- when: "github_token | length > 0"
-
-- name: Create instance of DevTest Lab Image -- idempotent
- azure_rm_devtestlabcustomimage:
- resource_group: "{{ resource_group }}"
- lab_name: "{{ lab_name }}"
- name: myImage
- source_vm: "{{ output_vm.virtualmachines[0]['name'] }}"
- linux_os_state: non_deprovisioned
- register: output
-- name: Assert that change was detected
- assert:
- that:
- - not output.changed
- when: "github_token | length > 0"
-
-- name: Delete instance of Lab -- check mode
- azure_rm_devtestlab:
- resource_group: "{{ resource_group }}"
- state: absent
- name: "{{ lab_name }}"
- check_mode: yes
- register: output
-- name: Assert if the change was correctly reported in check mode
- assert:
- that:
- - output.changed
-
-- name: Delete instance of Lab
- azure_rm_devtestlab:
- resource_group: "{{ resource_group }}"
- name: "{{ lab_name }}"
- state: absent
- register: output
-- name: Assert the change was correctly reported
- assert:
- that:
- - output.changed
-
-- name: Delete unexisting instance of Lab
- azure_rm_devtestlab:
- resource_group: "{{ resource_group }}"
- name: "{{ lab_name }}unexisting"
- state: absent
- register: output
-- name: Assert thes state has not changed
- assert:
- that:
- - output.changed == false
diff --git a/test/integration/targets/azure_rm_dnszone/aliases b/test/integration/targets/azure_rm_dnszone/aliases
deleted file mode 100644
index b048b01fe2..0000000000
--- a/test/integration/targets/azure_rm_dnszone/aliases
+++ /dev/null
@@ -1,6 +0,0 @@
-cloud/azure
-shippable/azure/group2
-destructive
-azure_rm_dnszone_info
-azure_rm_dnsrecordset
-azure_rm_dnsrecordset_info
diff --git a/test/integration/targets/azure_rm_dnszone/meta/main.yml b/test/integration/targets/azure_rm_dnszone/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_dnszone/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_dnszone/tasks/main.yml b/test/integration/targets/azure_rm_dnszone/tasks/main.yml
deleted file mode 100644
index 03c76397c1..0000000000
--- a/test/integration/targets/azure_rm_dnszone/tasks/main.yml
+++ /dev/null
@@ -1,355 +0,0 @@
-- name: Create random domain name
- set_fact:
- domain_name: "{{ resource_group | hash('md5') | truncate(16, True, '') + (65535 | random | string) }}"
-
-- name: Create a DNS zone (check mode)
- azure_rm_dnszone:
- resource_group: "{{ resource_group }}"
- name: "{{ domain_name }}.com"
- register: results
- check_mode: yes
-
-- assert:
- that: results.changed
-
-- name: Create a DNS zone
- azure_rm_dnszone:
- resource_group: "{{ resource_group }}"
- name: "{{ domain_name }}.com"
- register: results
-
-- assert:
- that: results.changed
-
-- name: Update DNS zone with tags
- azure_rm_dnszone:
- resource_group: "{{ resource_group }}"
- name: "{{ domain_name }}.com"
- tags:
- test: modified
- register: results
-
-- assert:
- that:
- - results.changed
- - results.state.tags.test == 'modified'
-
-- name: Retrieve DNS Zone Facts
- azure_rm_dnszone_info:
- resource_group: "{{ resource_group }}"
- name: "{{ domain_name }}.com"
- register: zones
-
-- name: Assert that facts module returned result
- assert:
- that:
- - zones.dnszones[0].tags.test == 'modified'
- - zones.dnszones[0].type == 'public'
-
-- name: Create virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: "{{ item }}"
- address_prefixes_cidr:
- - 10.1.0.0/16
- - 172.100.0.0/16
- with_items:
- - "{{ domain_name }}registration1"
- - "{{ domain_name }}resolution1"
- - "{{ domain_name }}registration2"
- - "{{ domain_name }}resolution2"
-
-- name: Create private dns zone
- azure_rm_dnszone:
- name: "{{ domain_name }}.private"
- resource_group: "{{ resource_group }}"
- type: private
- registration_virtual_networks:
- - name: "{{ domain_name }}registration1"
- resolution_virtual_networks:
- - name: "{{ domain_name }}resolution1"
- - name: "{{ domain_name }}resolution2"
- register: results
-
-- assert:
- that:
- - "results.state.registration_virtual_networks | length == 1"
- - "results.state.resolution_virtual_networks | length == 2"
- - results.state.type == 'private'
-
-- name: Update private dns zone
- azure_rm_dnszone:
- name: "{{ domain_name }}.private"
- resource_group: "{{ resource_group }}"
- type: private
- registration_virtual_networks:
- - name: "{{ domain_name }}registration1"
- resolution_virtual_networks:
- - name: "{{ domain_name }}resolution1"
- register: results
-
-- assert:
- that:
- - "results.state.registration_virtual_networks | length == 1"
- - "results.state.resolution_virtual_networks | length == 1"
- - results.state.type == 'private'
-
-- name: Test idempotent
- azure_rm_dnszone:
- name: "{{ item }}"
- resource_group: "{{ resource_group }}"
- with_items:
- - "{{ domain_name }}.com"
- - "{{ domain_name }}.private"
- register: results
-
-- assert:
- that:
- - "not {{ item.changed }}"
- with_items: "{{ results.results }}"
-
-#
-# azure_rm_dnsrecordset test
-#
-
-- name: create "A" record set with multiple records
- azure_rm_dnsrecordset:
- resource_group: "{{ resource_group }}"
- relative_name: www
- zone_name: "{{ domain_name }}.com"
- record_type: A
- records:
- - entry: 192.168.100.101
- - entry: 192.168.100.102
- - entry: 192.168.100.103
- register: results
-
-- name: Assert that A record set was created
- assert:
- that:
- - results.changed
- - 'results.state.arecords | length == 3'
-
-- name: re-run "A" record with same values
- azure_rm_dnsrecordset:
- resource_group: "{{ resource_group }}"
- relative_name: www
- zone_name: "{{ domain_name }}.com"
- record_type: A
- records:
- - entry: 192.168.100.101
- - entry: 192.168.100.102
- - entry: 192.168.100.103
- register: results
-
-- name: Assert that A record set was not changed
- assert:
- that: not results.changed
-
-- name: Update "A" record set with additional record
- azure_rm_dnsrecordset:
- resource_group: "{{ resource_group }}"
- relative_name: www
- zone_name: "{{ domain_name }}.com"
- record_type: A
- record_mode: append
- records:
- - entry: 192.168.100.104
- register: results
-
-- name: Assert that new record was appended
- assert:
- that:
- - results.changed
- - 'results.state.arecords | length == 4'
-
-- name: re-update "A" record set with additional record
- azure_rm_dnsrecordset:
- resource_group: "{{ resource_group }}"
- relative_name: www
- zone_name: "{{ domain_name }}.com"
- record_type: A
- record_mode: append
- records:
- - entry: 192.168.100.104
- register: results
-
-- name: Assert that A record set was not changed
- assert:
- that:
- - not results.changed
-
-- name: Remove 1 record from record set
- azure_rm_dnsrecordset:
- resource_group: "{{ resource_group }}"
- relative_name: www
- zone_name: "{{ domain_name }}.com"
- record_type: A
- records:
- - entry: 192.168.100.101
- - entry: 192.168.100.102
- - entry: 192.168.100.103
- register: results
-
-- name: Assert that record was deleted
- assert:
- that:
- - results.changed
- - 'results.state.arecords | length == 3'
-
-- name: Check_mode test
- azure_rm_dnsrecordset:
- resource_group: "{{ resource_group }}"
- relative_name: www
- zone_name: "{{ domain_name }}.com"
- record_type: A
- records:
- - entry: 192.168.100.105
- check_mode: yes
- register: results
-
-- name: Assert that check_mode returns new state
- assert:
- that:
- - results.changed
-
-# FUTURE: add facts module calls to ensure that we really didn't touch anything
-
-- name: create SRV records in a new record set
- azure_rm_dnsrecordset:
- resource_group: "{{ resource_group }}"
- relative_name: "_sip._tcp.{{ domain_name }}.com"
- zone_name: "{{ domain_name }}.com"
- time_to_live: 7200
- record_type: SRV
- records:
- - entry: sip.{{ domain_name }}.com
- priority: 20
- weight: 10
- port: 5060
- register: results
-
-- name: Assert that SRV record set was created
- assert:
- that:
- - results.changed
-
-- name: create TXT records in a new record set
- azure_rm_dnsrecordset:
- resource_group: "{{ resource_group }}"
- relative_name: "_txt.{{ domain_name }}.com"
- zone_name: "{{ domain_name }}.com"
- record_type: TXT
- records:
- - entry: "v=spf1 a -all"
- - entry: "foo"
- - entry:
- - "bar"
- - "baz"
- register: results
-
-- name: Assert that TXT record set was created
- assert:
- that:
- - results.changed
-
-#
-# azure_rm_dnsrecordset_info
-#
-
-- name: Retrieve DNS Record Set Facts for single Record Set
- azure_rm_dnsrecordset_info:
- resource_group: "{{ resource_group }}"
- zone_name: "{{ domain_name }}.com"
- relative_name: www
- record_type: A
- register: results
-
-- name: Assert that facts module returned result for single Record Set
- assert:
- that:
- - not results.changed
-# - azure_dnsrecordset[0].name == 'www'
- - results.dnsrecordsets[0].relative_name == 'www'
- - 'results.dnsrecordsets[0].records | length == 3'
- - results.dnsrecordsets[0].record_type == 'A'
-
-- name: Retrieve DNS Record Set Facts for all Record Sets
- azure_rm_dnsrecordset_info:
- resource_group: "{{ resource_group }}"
- zone_name: "{{ domain_name }}.com"
- register: facts
-
-- name: Assert that facts module returned result for all Record Sets
- assert:
- that:
- - not facts.changed
-# - facts.ansible_facts.azure_dnsrecordset[0].name == '@'
-# - facts.ansible_facts.azure_dnsrecordset[1].name == '@'
-# - facts.ansible_facts.azure_dnsrecordset[4].name == 'www'
-
-#
-# azure_rm_dnsrecordset cleanup
-#
-- name: delete all record sets except for @
- azure_rm_dnsrecordset:
- resource_group: "{{ resource_group }}"
- relative_name: "{{ item.relative_name }}"
- zone_name: "{{ domain_name }}.com"
- record_type: "{{ item.record_type }}"
- state: absent
- with_items: "{{ facts.dnsrecordsets }}"
- when:
- - item.relative_name != '@'
- register: results
-
-- name: Assert that record set deleted
- assert:
- that: results.changed
-
-- name: Retrieve DNS Record Set Facts for all Record Sets
- azure_rm_dnsrecordset_info:
- resource_group: "{{ resource_group }}"
- zone_name: "{{ domain_name }}.com"
- register: facts
-
-- name: Assert all record set deleted
- assert:
- that:
- - item.relative_name == '@'
- with_items: "{{ facts.dnsrecordsets }}"
-
-- name: (idempotence test) re-run record set absent
- azure_rm_dnsrecordset:
- resource_group: "{{ resource_group }}"
- relative_name: www
- zone_name: "{{ domain_name }}.com"
- record_type: A
- state: absent
- register: results
-
-- name:
- assert:
- that: not results.changed
-
-#
-# azure_rm_dnszone cleanup
-#
-- name: Delete DNS zone
- azure_rm_dnszone:
- resource_group: "{{ resource_group }}"
- name: "{{ item }}"
- state: absent
- with_items:
- - "{{ domain_name }}.com"
- - "{{ domain_name }}.private"
-
-- name: Delete DNS zone (idempotent)
- azure_rm_dnszone:
- resource_group: "{{ resource_group }}"
- name: "{{ domain_name }}.com"
- state: absent
- register: results
-
-- assert:
- that: not results.changed \ No newline at end of file
diff --git a/test/integration/targets/azure_rm_functionapp/aliases b/test/integration/targets/azure_rm_functionapp/aliases
deleted file mode 100644
index aa77c071a8..0000000000
--- a/test/integration/targets/azure_rm_functionapp/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/azure
-shippable/azure/group2
-destructive
diff --git a/test/integration/targets/azure_rm_functionapp/meta/main.yml b/test/integration/targets/azure_rm_functionapp/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_functionapp/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_functionapp/tasks/main.yml b/test/integration/targets/azure_rm_functionapp/tasks/main.yml
deleted file mode 100644
index cf200fb9c4..0000000000
--- a/test/integration/targets/azure_rm_functionapp/tasks/main.yml
+++ /dev/null
@@ -1,131 +0,0 @@
-- name: Fix resource prefix
- set_fact:
- fixed_resource_prefix: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
-
-- name: Fix resource prefix
- set_fact:
- funcapp_name_basic: "fa{{ fixed_resource_prefix }}basic"
- funcapp_name_container: "fa{{ fixed_resource_prefix }}container"
- funcapp_name_params: "fa{{ fixed_resource_prefix }}params"
- storage_account_name: "sa{{ fixed_resource_prefix }}"
- plan_name: "ap{{ fixed_resource_prefix }}"
-
-- name: create storage account for function apps
- azure_rm_storageaccount:
- resource_group: '{{ resource_group }}'
- name: "{{ storage_account_name }}"
- account_type: Standard_LRS
-
-- name: create basic function app
- azure_rm_functionapp:
- resource_group: "{{ resource_group }}"
- name: "{{ funcapp_name_basic }}"
- storage_account: "{{ storage_account_name }}"
- register: output
-
-- name: assert the function was created
- assert:
- that: output.changed
-
-- name: list facts for function
- azure_rm_functionapp_info:
- resource_group: '{{ resource_group }}'
- name: "{{ funcapp_name_basic }}"
- register: results
-
-- name: assert the facts were retrieved
- assert:
- that:
- - results.ansible_info.azure_functionapps|length == 1
- - results.ansible_info.azure_functionapps[0].name == "{{ funcapp_name_basic }}"
-
-- name: delete basic function app
- azure_rm_functionapp:
- resource_group: '{{ resource_group }}'
- name: "{{ funcapp_name_basic }}"
- state: absent
- register: output
-
-- name: assert the function was deleted
- assert:
- that: output.changed
-
-- name: create a function with app settings
- azure_rm_functionapp:
- resource_group: '{{ resource_group }}'
- name: "{{ funcapp_name_params }}"
- storage_account: "{{ storage_account_name }}"
- app_settings:
- hello: world
- things: more stuff
- FUNCTIONS_EXTENSION_VERSION: "~2"
- register: output
-
-- name: assert the function with app settings was created
- assert:
- that: output.changed
-
-- name: change app settings
- azure_rm_functionapp:
- resource_group: '{{ resource_group }}'
- name: "{{ funcapp_name_params }}"
- storage_account: "{{ storage_account_name }}"
- app_settings:
- hello: world
- things: more stuff
- FUNCTIONS_EXTENSION_VERSION: "~2"
- another: one
- register: output
-
-- name: assert the function was changed
- assert:
- that: output.changed
-
-- name: delete the function app
- azure_rm_functionapp:
- resource_group: '{{ resource_group }}'
- name: "{{ funcapp_name_params }}"
- state: absent
- register: output
-
-- name: assert the function was deleted
- assert:
- that: output.changed
-
-- name: Create a linux app service plan
- azure_rm_appserviceplan:
- resource_group: "{{ resource_group }}"
- name: "{{ plan_name }}"
- sku: S1
- is_linux: true
- number_of_workers: 1
-
-- name: "Create azure function app {{ function_app }}"
- azure_rm_functionapp:
- resource_group: "{{ resource_group }}"
- name: "{{ funcapp_name_container }}"
- storage_account: "{{ storage_account_name }}"
- plan:
- resource_group: "{{ resource_group }}"
- name: "{{ plan_name }}"
- container_settings:
- name: httpd
- app_settings:
- FUNCTIONS_EXTENSION_VERSION: "~2"
- register: output
-
-- name: assert the function was changed
- assert:
- that: output.changed
-
-- name: delete the function app
- azure_rm_functionapp:
- resource_group: '{{ resource_group }}'
- name: "{{ funcapp_name_container }}"
- state: absent
-
-- name: delete storage account
- azure_rm_storageaccount:
- resource_group: '{{ resource_group }}'
- name: "{{ storage_account_name }}"
- state: absent
diff --git a/test/integration/targets/azure_rm_gallery/aliases b/test/integration/targets/azure_rm_gallery/aliases
deleted file mode 100644
index df49fa1efc..0000000000
--- a/test/integration/targets/azure_rm_gallery/aliases
+++ /dev/null
@@ -1,6 +0,0 @@
-cloud/azure
-shippable/azure/group4
-destructive
-azure_rm_galleryimage
-azure_rm_galleryimageversion
-azure_rm_snapshot
diff --git a/test/integration/targets/azure_rm_gallery/meta/main.yml b/test/integration/targets/azure_rm_gallery/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_gallery/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_gallery/tasks/main.yml b/test/integration/targets/azure_rm_gallery/tasks/main.yml
deleted file mode 100644
index 000f9e8855..0000000000
--- a/test/integration/targets/azure_rm_gallery/tasks/main.yml
+++ /dev/null
@@ -1,342 +0,0 @@
-- name: Prepare random number
- set_fact:
- rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-- name: Create virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: testVnet
- address_prefixes: "10.0.0.0/16"
-
-- name: Add subnet
- azure_rm_subnet:
- resource_group: "{{ resource_group }}"
- name: testSubnet
- address_prefix: "10.0.1.0/24"
- virtual_network: testVnet
-
-- name: Create public IP address
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- allocation_method: Static
- name: testPublicIP
-
-- name: Create virtual network inteface cards for VM A and B
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "vmforimage{{ rpfx }}nic"
- virtual_network: testVnet
- subnet: testSubnet
-
-- name: Create VM
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "vmforimage{{ rpfx }}"
- location: eastus
- admin_username: testuser
- admin_password: "Password1234!"
- vm_size: Standard_B1ms
- network_interfaces: "vmforimage{{ rpfx }}nic"
- image:
- offer: UbuntuServer
- publisher: Canonical
- sku: 16.04-LTS
- version: latest
-
-- name: Get VM facts
- azure_rm_virtualmachine_facts:
- resource_group: "{{ resource_group }}"
- name: "vmforimage{{ rpfx }}"
- register: output
-
-- name: Create a snapshot by importing an unmanaged blob from the same subscription.
- azure_rm_snapshot:
- resource_group: "{{ resource_group }}"
- name: "mySnapshot-{{ rpfx }}"
- location: eastus
- creation_data:
- create_option: Import
- source_uri: 'https://{{ output.vms[0].storage_account_name }}.blob.core.windows.net/{{ output.vms[0].storage_container_name }}/{{ output.vms[0].storage_blob_name }}'
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Generalize VM
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "vmforimage{{ rpfx }}"
- generalized: yes
-- name: Create custom image
- azure_rm_image:
- resource_group: "{{ resource_group }}"
- name: testimagea
- source: "vmforimage{{ rpfx }}"
-- name: Create or update a simple gallery.
- azure_rm_gallery:
- resource_group: "{{ resource_group }}"
- name: myGallery{{ rpfx }}
- location: eastus
- description: This is the gallery description.
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Create or update a simple gallery - idempotent
- azure_rm_gallery:
- resource_group: "{{ resource_group }}"
- name: myGallery{{ rpfx }}
- location: eastus
- description: This is the gallery description.
- register: output
-
-- assert:
- that:
- - not output.changed
-
-- name: Create or update a simple gallery - change description
- azure_rm_gallery:
- resource_group: "{{ resource_group }}"
- name: myGallery{{ rpfx }}
- location: eastus
- description: This is the gallery description - xxx.
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Get a gallery info.
- azure_rm_gallery_info:
- resource_group: "{{ resource_group }}"
- name: myGallery{{ rpfx }}
- register: output
-
-- assert:
- that:
- - not output.changed
- - output.galleries['id'] != None
- - output.galleries['name'] != None
- - output.galleries['location'] != None
- - output.galleries['description'] != None
- - output.galleries['provisioning_state'] != None
-
-- name: Create or update gallery image
- azure_rm_galleryimage:
- resource_group: "{{ resource_group }}"
- gallery_name: myGallery{{ rpfx }}
- name: myImage
- location: eastus
- os_type: linux
- os_state: generalized
- identifier:
- publisher: myPublisherName
- offer: myOfferName
- sku: mySkuName
- description: Image Description
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Create or update gallery image - idempotent
- azure_rm_galleryimage:
- resource_group: "{{ resource_group }}"
- gallery_name: myGallery{{ rpfx }}
- name: myImage
- location: eastus
- os_type: linux
- os_state: generalized
- identifier:
- publisher: myPublisherName
- offer: myOfferName
- sku: mySkuName
- description: Image Description
- register: output
-
-- assert:
- that:
- - not output.changed
-
-- name: Create or update gallery image - change description
- azure_rm_galleryimage:
- resource_group: "{{ resource_group }}"
- gallery_name: myGallery{{ rpfx }}
- name: myImage
- location: eastus
- os_type: linux
- os_state: generalized
- identifier:
- publisher: myPublisherName
- offer: myOfferName
- sku: mySkuName
- description: Image Description XXXs
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Get a gallery image info.
- azure_rm_galleryimage_info:
- resource_group: "{{ resource_group }}"
- gallery_name: myGallery{{ rpfx }}
- name: myImage
- register: output
-
-- assert:
- that:
- - not output.changed
- - output.images['id'] != None
- - output.images['name'] != None
- - output.images['location'] != None
- - output.images['os_state'] != None
- - output.images['os_type'] != None
- - output.images['identifier'] != None
-
-- name: Create or update a simple gallery Image Version.
- azure_rm_galleryimageversion:
- resource_group: "{{ resource_group }}"
- gallery_name: myGallery{{ rpfx }}
- gallery_image_name: myImage
- name: 10.1.3
- location: eastus
- publishing_profile:
- end_of_life_date: "2020-10-01t00:00:00+00:00"
- exclude_from_latest: yes
- replica_count: 3
- storage_account_type: Standard_LRS
- target_regions:
- - name: eastus
- regional_replica_count: 1
- - name: westus
- regional_replica_count: 2
- storage_account_type: Standard_ZRS
- managed_image:
- name: testimagea
- resource_group: "{{ resource_group }}"
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Create or update a simple gallery Image Version - idempotent
- azure_rm_galleryimageversion:
- resource_group: "{{ resource_group }}"
- gallery_name: myGallery{{ rpfx }}
- gallery_image_name: myImage
- name: 10.1.3
- location: eastus
- publishing_profile:
- end_of_life_date: "2020-10-01t00:00:00+00:00"
- exclude_from_latest: yes
- replica_count: 3
- storage_account_type: Standard_LRS
- target_regions:
- - name: eastus
- regional_replica_count: 1
- - name: westus
- regional_replica_count: 2
- storage_account_type: Standard_ZRS
- managed_image:
- name: testimagea
- resource_group: "{{ resource_group }}"
- register: output
-
-- assert:
- that:
- - not output.changed
-
-- name: Create or update a simple gallery Image Version - change end of life
- azure_rm_galleryimageversion:
- resource_group: "{{ resource_group }}"
- gallery_name: myGallery{{ rpfx }}
- gallery_image_name: myImage
- name: 10.1.3
- location: eastus
- publishing_profile:
- end_of_life_date: "2021-10-01t00:00:00+00:00"
- exclude_from_latest: yes
- replica_count: 3
- storage_account_type: Standard_LRS
- target_regions:
- - name: eastus
- regional_replica_count: 1
- - name: westus
- regional_replica_count: 2
- storage_account_type: Standard_ZRS
- managed_image:
- name: testimagea
- resource_group: "{{ resource_group }}"
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Get a simple gallery Image Version info.
- azure_rm_galleryimageversion_info:
- resource_group: "{{ resource_group }}"
- gallery_name: myGallery{{ rpfx }}
- gallery_image_name: myImage
- name: 10.1.3
- register: output
-
-- assert:
- that:
- - not output.changed
- - output.versions['id'] != None
- - output.versions['name'] != None
- - output.versions['location'] != None
- - output.versions['publishing_profile'] != None
- - output.versions['provisioning_state'] != None
-
-- name: Delete gallery image Version.
- azure_rm_galleryimageversion:
- resource_group: "{{ resource_group }}"
- gallery_name: myGallery{{ rpfx }}
- gallery_image_name: myImage
- name: 10.1.3
- state: absent
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: pasue 2 minutes, wait for deletion complete
- pause:
- minutes: 2
-
-- name: Delete gallery image
- azure_rm_galleryimage:
- resource_group: "{{ resource_group }}"
- gallery_name: myGallery{{ rpfx }}
- name: myImage
- state: absent
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: pasue 2 minutes, wait for deletion complete
- pause:
- minutes: 2
-
-- name: Delete gallery
- azure_rm_gallery:
- resource_group: "{{ resource_group }}"
- name: myGallery{{ rpfx }}
- state: absent
- register: output
-
-- assert:
- that:
- - output.changed
diff --git a/test/integration/targets/azure_rm_hdinsightcluster/aliases b/test/integration/targets/azure_rm_hdinsightcluster/aliases
deleted file mode 100644
index ef98a7be22..0000000000
--- a/test/integration/targets/azure_rm_hdinsightcluster/aliases
+++ /dev/null
@@ -1,6 +0,0 @@
-cloud/azure
-destructive
-shippable/azure/group1
-unstable # test is slow (~30 minute run time), not unstable, but this is better than unsupported
-azure_rm_hdinsightcluster_info
-disabled
diff --git a/test/integration/targets/azure_rm_hdinsightcluster/meta/main.yml b/test/integration/targets/azure_rm_hdinsightcluster/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_hdinsightcluster/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_hdinsightcluster/tasks/main.yml b/test/integration/targets/azure_rm_hdinsightcluster/tasks/main.yml
deleted file mode 100644
index 2bf0a98191..0000000000
--- a/test/integration/targets/azure_rm_hdinsightcluster/tasks/main.yml
+++ /dev/null
@@ -1,244 +0,0 @@
-- name: Prepare random number
- set_fact:
- rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-- name: Create storage account
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "storage{{ rpfx }}"
- account_type: Standard_LRS
- location: eastus2
-
-- name: Sample for Azure REST API - StorageAccounts_ListKeys
- azure_rm_resource:
- api_version: '2018-07-01'
- method: POST
- resource_group: "{{ resource_group }}"
- provider: storage
- resource_type: storageaccounts
- resource_name: "storage{{ rpfx }}"
- subresource:
- - type: listkeys
- register: storage_output
-
-- debug:
- var: storage_output
-
-- name: Create instance of Cluster -- check mode
- azure_rm_hdinsightcluster:
- resource_group: "{{ resource_group }}"
- name: "cluster{{ rpfx }}"
- location: eastus2
- cluster_version: 3.6
- os_type: linux
- tier: standard
- cluster_definition:
- kind: spark
- gateway_rest_username: http-user
- gateway_rest_password: MuABCPassword!!@123
- storage_accounts:
- - name: storage{{ rpfx }}.blob.core.windows.net
- is_default: yes
- container: "cluster{{ rpfx }}"
- key: "{{ storage_output['response']['keys'][0]['value'] }}"
- compute_profile_roles:
- - name: headnode
- target_instance_count: 1
- vm_size: Standard_D3
- linux_profile:
- username: sshuser
- password: MuABCPassword!!@123
- - name: workernode
- target_instance_count: 1
- vm_size: Standard_D3
- linux_profile:
- username: sshuser
- password: MuABCPassword!!@123
- - name: zookeepernode
- target_instance_count: 3
- vm_size: Medium
- linux_profile:
- username: sshuser
- password: MuABCPassword!!@123
- check_mode: yes
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create instance of Cluster
- azure_rm_hdinsightcluster:
- resource_group: "{{ resource_group }}"
- name: "cluster{{ rpfx }}"
- location: eastus2
- cluster_version: 3.6
- os_type: linux
- tier: standard
- cluster_definition:
- kind: spark
- gateway_rest_username: http-user
- gateway_rest_password: MuABCPassword!!@123
- storage_accounts:
- - name: storage{{ rpfx }}.blob.core.windows.net
- is_default: yes
- container: "cluster{{ rpfx }}"
- key: "{{ storage_output['response']['keys'][0]['value'] }}"
- compute_profile_roles:
- - name: headnode
- target_instance_count: 1
- vm_size: Standard_D3
- linux_profile:
- username: sshuser
- password: MuABCPassword!!@123
- - name: workernode
- target_instance_count: 1
- vm_size: Standard_D3
- linux_profile:
- username: sshuser
- password: MuABCPassword!!@123
- - name: zookeepernode
- target_instance_count: 3
- vm_size: Medium
- linux_profile:
- username: sshuser
- password: MuABCPassword!!@123
- register: output
-
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create again instance of Cluster -- idempotent
- azure_rm_hdinsightcluster:
- resource_group: "{{ resource_group }}"
- name: "cluster{{ rpfx }}"
- location: eastus2
- cluster_version: 3.6
- os_type: linux
- tier: standard
- cluster_definition:
- kind: spark
- gateway_rest_username: http-user
- gateway_rest_password: MuABCPassword!!@123
- storage_accounts:
- - name: storage{{ rpfx }}.blob.core.windows.net
- is_default: yes
- container: "cluster{{ rpfx }}"
- key: "{{ storage_output['response']['keys'][0]['value'] }}"
- compute_profile_roles:
- - name: headnode
- target_instance_count: 1
- vm_size: Standard_D3
- linux_profile:
- username: sshuser
- password: MuABCPassword!!@123
- - name: workernode
- target_instance_count: 1
- vm_size: Standard_D3
- linux_profile:
- username: sshuser
- password: MuABCPassword!!@123
- - name: zookeepernode
- target_instance_count: 3
- vm_size: Medium
- linux_profile:
- username: sshuser
- password: MuABCPassword!!@123
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed == false
-
-- name: Create again instance of Cluster -- resize and add tags
- azure_rm_hdinsightcluster:
- resource_group: "{{ resource_group }}"
- name: "cluster{{ rpfx }}"
- location: eastus2
- cluster_version: 3.6
- os_type: linux
- tier: standard
- cluster_definition:
- kind: spark
- gateway_rest_username: http-user
- gateway_rest_password: MuABCPassword!!@123
- storage_accounts:
- - name: storage{{ rpfx }}.blob.core.windows.net
- is_default: yes
- container: "cluster{{ rpfx }}"
- key: "{{ storage_output['response']['keys'][0]['value'] }}"
- compute_profile_roles:
- - name: headnode
- target_instance_count: 1
- vm_size: Standard_D3
- linux_profile:
- username: sshuser
- password: MuABCPassword!!@123
- - name: workernode
- target_instance_count: 2
- vm_size: Standard_D3
- linux_profile:
- username: sshuser
- password: MuABCPassword!!@123
- - name: zookeepernode
- target_instance_count: 3
- vm_size: Medium
- linux_profile:
- username: sshuser
- password: MuABCPassword!!@123
- tags:
- aaa: bbb
- register: output
-- debug:
- var: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Get facts of Cluster
- azure_rm_hdinsightcluster_info:
- resource_group: "{{ resource_group }}"
- name: "cluster{{ rpfx }}"
- register: output
-
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.clusters[0]['id'] != None
- - output.clusters[0]['resource_group'] != None
- - output.clusters[0]['name'] != None
- - output.clusters[0]['location'] != None
- - output.clusters[0]['cluster_version'] != None
- - output.clusters[0]['os_type'] != None
- - output.clusters[0]['tier'] != None
- - output.clusters[0]['cluster_definition'] != None
- - output.clusters[0]['compute_profile_roles'] != None
- - output.clusters[0]['connectivity_endpoints'] != None
-
-- name: Delete instance of Cluster -- check mode
- azure_rm_hdinsightcluster:
- resource_group: "{{ resource_group }}"
- name: "cluster{{ rpfx }}"
- state: absent
- check_mode: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete instance of Cluster
- azure_rm_hdinsightcluster:
- resource_group: "{{ resource_group }}"
- name: "cluster{{ rpfx }}"
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
diff --git a/test/integration/targets/azure_rm_image/aliases b/test/integration/targets/azure_rm_image/aliases
deleted file mode 100644
index 2d7dea2cef..0000000000
--- a/test/integration/targets/azure_rm_image/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group3
-destructive
-azure_rm_image_info
diff --git a/test/integration/targets/azure_rm_image/meta/main.yml b/test/integration/targets/azure_rm_image/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_image/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_image/tasks/main.yml b/test/integration/targets/azure_rm_image/tasks/main.yml
deleted file mode 100644
index 9b68a92e7a..0000000000
--- a/test/integration/targets/azure_rm_image/tasks/main.yml
+++ /dev/null
@@ -1,171 +0,0 @@
-- name: Create storage account name
- set_fact:
- vm_name: "vm{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}x"
- public_ip_name: "pip{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- security_group_name: "sg{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- empty_disk_name: "emptydisk{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
-
-- name: Create virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- address_prefixes: "10.10.0.0/16"
-
-- name: Add subnet
- azure_rm_subnet:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- address_prefix: "10.10.0.0/24"
- virtual_network: "{{ vm_name }}"
-
-- name: Create public ip
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- allocation_method: Static
- name: "{{ public_ip_name }}"
-
-- name: Create security group
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ security_group_name }}"
-
-- name: Create NIC
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- virtual_network: "{{ vm_name }}"
- subnet: "{{ vm_name }}"
- public_ip_name: "{{ public_ip_name }}"
- security_group: "{{ security_group_name }}"
-
-- name: Create virtual machine
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- vm_size: Standard_A0
- managed_disk_type: Standard_LRS
- admin_username: adminuser
- admin_password: Password123!
- os_type: Linux
- network_interfaces: "{{ vm_name }}"
- image:
- offer: UbuntuServer
- publisher: Canonical
- sku: 16.04-LTS
- version: latest
- register: vm
-
-- name: Create new empty managed disk
- azure_rm_manageddisk:
- resource_group: "{{ resource_group }}"
- name: "{{ empty_disk_name }}"
- storage_account_type: "Standard_LRS"
- disk_size_gb: 1
- register: emptydisk
-
-- name: Create an image from VM (check mode)
- azure_rm_image:
- resource_group: "{{ resource_group }}"
- source: "{{ vm.ansible_facts.azure_vm.properties.storageProfile.osDisk.managedDisk.id }}"
- name: testimage001
- os_type: Linux
- data_disk_sources:
- - "{{ empty_disk_name }}"
- check_mode: yes
- register: output
-
-- assert:
- that: output.changed
-
-- name: Create an image from VM
- azure_rm_image:
- resource_group: "{{ resource_group }}"
- source:
- name: "{{ vm_name }}"
- type: disks
- name: testimage001
- os_type: Linux
- register: output
-
-- assert:
- that:
- - output.changed
- - output.id
-
-- name: Create an image from VM (idempotent)
- azure_rm_image:
- resource_group: "{{ resource_group }}"
- source: "{{ vm.ansible_facts.azure_vm.properties.storageProfile.osDisk.managedDisk.id }}"
- name: testimage001
- os_type: Linux
- register: output
-
-- assert:
- that:
- - not output.changed
- - output.id
-
-- name: Gather information about image created
- azure_rm_image_info:
- resource_group: "{{ resource_group }}"
- name: testimage001
- register: output
-
-- assert:
- that:
- - output.images != []
-
-- name: Delete image (check mode)
- azure_rm_image:
- resource_group: "{{ resource_group }}"
- name: testimage001
- state: absent
- register: output
- check_mode: yes
-
-- assert:
- that:
- - output.changed
-
-- name: Delete image
- azure_rm_image:
- resource_group: "{{ resource_group }}"
- name: testimage001
- state: absent
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Delete image (idempotent)
- azure_rm_image:
- resource_group: "{{ resource_group }}"
- name: testimage001
- state: absent
- register: output
-
-- assert:
- that:
- - not output.changed
-
-- name: Delete empty disk
- azure_rm_manageddisk:
- resource_group: "{{ resource_group }}"
- name: "{{ empty_disk_name }}"
- state: absent
-
-- name: Delete VM
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- state: absent
- vm_size: Standard_A0
- register: output
-
-- name: Delete public ip
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- allocation_method: Static
- name: "{{ public_ip_name }}"
- state: absent
diff --git a/test/integration/targets/azure_rm_iothub/aliases b/test/integration/targets/azure_rm_iothub/aliases
deleted file mode 100644
index b105765482..0000000000
--- a/test/integration/targets/azure_rm_iothub/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group2
-disabled # See: https://github.com/ansible/ansible/issues/61852
-destructive
diff --git a/test/integration/targets/azure_rm_iothub/meta/main.yml b/test/integration/targets/azure_rm_iothub/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_iothub/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_iothub/tasks/main.yml b/test/integration/targets/azure_rm_iothub/tasks/main.yml
deleted file mode 100644
index 6055ea1f85..0000000000
--- a/test/integration/targets/azure_rm_iothub/tasks/main.yml
+++ /dev/null
@@ -1,172 +0,0 @@
-- set_fact:
- rpfx: "{{ resource_group | hash('md5') | truncate(8, True, '') }}"
-
-- name: Create IoT Hub (check mode)
- azure_rm_iothub:
- name: "hub{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- ip_filters:
- - name: filter1
- action: reject
- ip_mask: 40.60.80.10
- check_mode: yes
- register: iothub
-
-- assert:
- that:
- - iothub.changed
-
-- name: Query IoT Hub
- azure_rm_iothub_info:
- name: "hub{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- register: iothub
- ignore_errors: yes
-
-- name: Create IoT Hub
- azure_rm_iothub:
- name: "hub{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- ip_filters:
- - name: filter1
- action: reject
- ip_mask: 40.60.80.10
- register: iothub
-
-- assert:
- that:
- - iothub.changed
-
-- name: Create IoT Hub (idempontent)
- azure_rm_iothub:
- name: "hub{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- ip_filters:
- - name: filter1
- action: reject
- ip_mask: 40.60.80.10
- register: iothub
-
-- assert:
- that:
- - not iothub.changed
-
-- name: Query IoT Hub
- azure_rm_iothub_info:
- name: "hub{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- list_keys: yes
- register: iothub
-
-- assert:
- that:
- - iothub.iothubs | length == 1
-
-- set_fact:
- registry_write_name: "{{ item.key_name }}"
- registry_write_key: "{{ item.primary_key }}"
- with_items: "{{ iothub.iothubs[0]['keys'] }}"
- when: item.rights == 'RegistryWrite, ServiceConnect, DeviceConnect'
-
-- name: Create devices
- azure_rm_iotdevice:
- hub: "hub{{ rpfx }}"
- hub_policy_name: "{{ registry_write_name }}"
- hub_policy_key: "{{ registry_write_key }}"
- name: "mydevice{{ item }}"
- twin_tags:
- location:
- country: US
- city: Redmond
- sensor: humidity
- with_items:
- - 1
- - 2
-
-- name: Query devices
- azure_rm_iotdevice_info:
- hub: "hub{{ rpfx }}"
- hub_policy_name: "{{ registry_write_name }}"
- hub_policy_key: "{{ registry_write_key }}"
- register: devices
-
-- assert:
- that:
- - devices.iot_devices | length == 2
-
-- name: Query devices
- azure_rm_iotdevice_info:
- hub: "hub{{ rpfx }}"
- name: "mydevice1"
- hub_policy_name: "{{ registry_write_name }}"
- hub_policy_key: "{{ registry_write_key }}"
- register: devices
-
-- assert:
- that:
- - devices.iot_devices | length == 1
- - devices.iot_devices[0].deviceId == 'mydevice1'
-
-- name: Query devices twin
- azure_rm_iotdevice_info:
- hub: "hub{{ rpfx }}"
- query: "SELECT * FROM devices WHERE tags.location.country = 'US'"
- hub_policy_name: "{{ registry_write_name }}"
- hub_policy_key: "{{ registry_write_key }}"
- register: devices
-
-- assert:
- that:
- - devices.iot_devices | length == 2
-
-- name: Update devices
- azure_rm_iotdevice:
- hub: "hub{{ rpfx }}"
- hub_policy_name: "{{ registry_write_name }}"
- hub_policy_key: "{{ registry_write_key }}"
- name: "mydevice{{ item }}"
- edge_enabled: yes
- twin_tags:
- location:
- country: China
- city: Shanghai
- sensor: humidity
- with_items:
- - 1
- - 3
-
-- name: Query devices twin
- azure_rm_iotdevice_info:
- hub: "hub{{ rpfx }}"
- query: "SELECT * FROM devices WHERE tags.location.country = 'US'"
- hub_policy_name: "{{ registry_write_name }}"
- hub_policy_key: "{{ registry_write_key }}"
- register: devices
-
-- assert:
- that:
- - devices.iot_devices | length == 1
- - devices.iot_devices[0].deviceId == 'mydevice2'
-
-- name: Delete IoT Hub (check mode)
- azure_rm_iothub:
- name: "hub{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- state: absent
- check_mode: yes
- register: iothub
-
-- assert:
- that:
- - iothub.changed
-
-- name: Delete IoT Hub
- azure_rm_iothub:
- name: "hub{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- state: absent
- register: iothub
-
-- assert:
- that:
- - iothub.changed
diff --git a/test/integration/targets/azure_rm_keyvault/aliases b/test/integration/targets/azure_rm_keyvault/aliases
deleted file mode 100644
index c256751e55..0000000000
--- a/test/integration/targets/azure_rm_keyvault/aliases
+++ /dev/null
@@ -1,5 +0,0 @@
-cloud/azure
-destructive
-shippable/azure/group9
-azure_rm_keyvaultkey
-azure_rm_keyvaultsecret
diff --git a/test/integration/targets/azure_rm_keyvault/lookup_plugins/azure_service_principal_attribute.py b/test/integration/targets/azure_rm_keyvault/lookup_plugins/azure_service_principal_attribute.py
deleted file mode 100644
index 1b7d0318f0..0000000000
--- a/test/integration/targets/azure_rm_keyvault/lookup_plugins/azure_service_principal_attribute.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# (c) 2018 Yunge Zhu, <yungez@microsoft.com>
-# (c) 2017 Ansible Project
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = """
-lookup: azure_service_principal_attribute
-
-requirements:
- - azure-graphrbac
-
-author:
- - Yunge Zhu <yungez@microsoft.com>
-
-version_added: "2.7"
-
-short_description: Look up Azure service principal attributes.
-
-description:
- - Describes object id of your Azure service principal account.
-options:
- azure_client_id:
- description: azure service principal client id.
- azure_secret:
- description: azure service principal secret
- azure_tenant:
- description: azure tenant
- azure_cloud_environment:
- description: azure cloud environment
-"""
-
-EXAMPLES = """
-set_fact:
- object_id: "{{ lookup('azure_service_principal_attribute',
- azure_client_id=azure_client_id,
- azure_secret=azure_secret,
- azure_tenant=azure_secret) }}"
-"""
-
-RETURN = """
-_raw:
- description:
- Returns object id of service principal.
-"""
-
-from ansible.errors import AnsibleError
-from ansible.plugins import AnsiblePlugin
-from ansible.plugins.lookup import LookupBase
-from ansible.module_utils._text import to_native
-
-try:
- from azure.common.credentials import ServicePrincipalCredentials
- from azure.graphrbac import GraphRbacManagementClient
- from msrestazure import azure_cloud
- from msrestazure.azure_exceptions import CloudError
-except ImportError:
- raise AnsibleError(
- "The lookup azure_service_principal_attribute requires azure.graphrbac, msrest")
-
-
-class LookupModule(LookupBase):
- def run(self, terms, variables, **kwargs):
-
- self.set_options(direct=kwargs)
-
- credentials = {}
- credentials['azure_client_id'] = self.get_option('azure_client_id', None)
- credentials['azure_secret'] = self.get_option('azure_secret', None)
- credentials['azure_tenant'] = self.get_option('azure_tenant', 'common')
-
- if credentials['azure_client_id'] is None or credentials['azure_secret'] is None:
- raise AnsibleError("Must specify azure_client_id and azure_secret")
-
- _cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD
- if self.get_option('azure_cloud_environment', None) is not None:
- cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(credentials['azure_cloud_environment'])
-
- try:
- azure_credentials = ServicePrincipalCredentials(client_id=credentials['azure_client_id'],
- secret=credentials['azure_secret'],
- tenant=credentials['azure_tenant'],
- resource=_cloud_environment.endpoints.active_directory_graph_resource_id)
-
- client = GraphRbacManagementClient(azure_credentials, credentials['azure_tenant'],
- base_url=_cloud_environment.endpoints.active_directory_graph_resource_id)
-
- response = list(client.service_principals.list(filter="appId eq '{0}'".format(credentials['azure_client_id'])))
- sp = response[0]
-
- return sp.object_id.split(',')
- except CloudError as ex:
- raise AnsibleError("Failed to get service principal object id: %s" % to_native(ex))
- return False
diff --git a/test/integration/targets/azure_rm_keyvault/meta/main.yml b/test/integration/targets/azure_rm_keyvault/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_keyvault/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_keyvault/tasks/main.yml b/test/integration/targets/azure_rm_keyvault/tasks/main.yml
deleted file mode 100644
index 88b2cf08cc..0000000000
--- a/test/integration/targets/azure_rm_keyvault/tasks/main.yml
+++ /dev/null
@@ -1,270 +0,0 @@
-- name: Prepare random number
- set_fact:
- rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- tenant_id: "{{ azure_tenant }}"
- run_once: yes
-
-- name: lookup service principal object id
- set_fact:
- object_id: "{{ lookup('azure_service_principal_attribute',
- azure_client_id=azure_client_id,
- azure_secret=azure_secret,
- azure_tenant=tenant_id) }}"
- register: object_id
-
-- name: Create instance of Key Vault -- check mode
- azure_rm_keyvault:
- resource_group: "{{ resource_group }}"
- vault_name: "vault{{ rpfx }}"
- enabled_for_deployment: yes
- vault_tenant: "{{ tenant_id }}"
- sku:
- name: standard
- family: A
- access_policies:
- - tenant_id: "{{ tenant_id }}"
- object_id: "{{ object_id }}"
- keys:
- - get
- - list
- - update
- - create
- - import
- - delete
- - recover
- - backup
- - restore
- secrets:
- - get
- - list
- - set
- - delete
- - recover
- - backup
- - restore
- check_mode: yes
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create instance of Key Vault
- azure_rm_keyvault:
- resource_group: "{{ resource_group }}"
- vault_name: "vault{{ rpfx }}"
- enabled_for_deployment: yes
- vault_tenant: "{{ tenant_id }}"
- sku:
- name: standard
- family: A
- access_policies:
- - tenant_id: "{{ tenant_id }}"
- object_id: "{{ object_id }}"
- secrets:
- - get
- - list
- - set
- - delete
- - recover
- - backup
- - restore
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create instance of Key Vault again
- azure_rm_keyvault:
- resource_group: "{{ resource_group }}"
- vault_name: "vault{{ rpfx }}"
- enabled_for_deployment: yes
- vault_tenant: "{{ tenant_id }}"
- sku:
- name: standard
- family: A
- access_policies:
- - tenant_id: "{{ tenant_id }}"
- object_id: "{{ object_id }}"
- secrets:
- - get
- - list
- - set
- - delete
- - recover
- - backup
- - restore
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed == false
-
-- name: Update existing Key Vault (add a rule and tags)
- azure_rm_keyvault:
- resource_group: "{{ resource_group }}"
- vault_name: "vault{{ rpfx }}"
- enabled_for_deployment: yes
- vault_tenant: "{{ tenant_id }}"
- sku:
- name: standard
- family: A
- access_policies:
- - tenant_id: "{{ tenant_id }}"
- object_id: "{{ object_id }}"
- keys:
- - get
- - list
- - update
- - create
- - import
- - delete
- - recover
- - backup
- - restore
- secrets:
- - get
- - list
- - set
- - delete
- - recover
- - backup
- - restore
- tags:
- aaa: bbb
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed == true
-
-- name: Get key vault facts
- azure_rm_keyvault_info:
- resource_group: "{{ resource_group }}"
- name: "vault{{ rpfx }}"
- register: facts
-
-- name: Assert the facts are properly set
- assert:
- that:
- - facts['keyvaults'] | length == 1
- - facts['keyvaults'][0]['vault_uri'] != None
- - facts['keyvaults'][0]['name'] != None
- - facts['keyvaults'][0]['access_policies'] != None
- - facts['keyvaults'][0]['sku'] != None
- - facts['keyvaults'][0]['id'] != None
-#
-# azure_rm_keyvaultkey tests
-#
-
-- name: create a keyvault key
- block:
- - azure_rm_keyvaultkey:
- keyvault_uri: https://vault{{ rpfx }}.vault.azure.net
- key_name: testkey
- tags:
- testing: test
- delete: on-exit
- register: output
- - assert:
- that: output.changed
- rescue:
- - azure_rm_keyvaultkey:
- keyvault_uri: https://vault{{ rpfx }}.vault.azure.net
- state: absent
- key_name: testkey
-
-- name: Get key current version
- azure_rm_keyvaultkey_info:
- vault_uri: https://vault{{ rpfx }}.vault.azure.net
- name: testkey
- register: facts
-
-- name: Assert key facts
- assert:
- that:
- - facts['keys'] | length == 1
- - facts['keys'][0]['kid']
- - facts['keys'][0]['permitted_operations'] | length > 0
- - facts['keys'][0]['type']
- - facts['keys'][0]['version']
-
-- name: delete a kevyault key
- azure_rm_keyvaultkey:
- keyvault_uri: https://vault{{ rpfx }}.vault.azure.net
- state: absent
- key_name: testkey
- register: output
-
-- assert:
- that: output.changed
-
-#
-# azure_rm_keyvaultsecret tests
-#
-- name: create a keyvault secret
- block:
- - azure_rm_keyvaultsecret:
- keyvault_uri: https://vault{{ rpfx }}.vault.azure.net
- secret_name: testsecret
- secret_value: 'mysecret'
- tags:
- testing: test
- delete: on-exit
- register: output
- - assert:
- that: output.changed
- rescue:
- - azure_rm_keyvaultsecret:
- keyvault_uri: https://vault{{ rpfx }}.vault.azure.net
- state: absent
- secret_name: testsecret
-
-- name: delete a keyvault secret
- azure_rm_keyvaultsecret:
- keyvault_uri: https://vault{{ rpfx }}.vault.azure.net
- state: absent
- secret_name: testsecret
- register: output
-
-- assert:
- that: output.changed
-
-#
-# azure_rm_keyvault finalize & clean up
-#
-
-- name: Delete instance of Key Vault -- check mode
- azure_rm_keyvault:
- resource_group: "{{ resource_group }}"
- vault_name: "vault{{ rpfx }}"
- state: absent
- check_mode: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete instance of Key Vault
- azure_rm_keyvault:
- resource_group: "{{ resource_group }}"
- vault_name: "vault{{ rpfx }}"
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete unexisting instance of Key Vault
- azure_rm_keyvault:
- resource_group: "{{ resource_group }}"
- vault_name: "vault{{ rpfx }}"
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed == false
diff --git a/test/integration/targets/azure_rm_loadbalancer/aliases b/test/integration/targets/azure_rm_loadbalancer/aliases
deleted file mode 100644
index fadef3afa8..0000000000
--- a/test/integration/targets/azure_rm_loadbalancer/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/azure
-shippable/azure/group5
-destructive
diff --git a/test/integration/targets/azure_rm_loadbalancer/meta/main.yml b/test/integration/targets/azure_rm_loadbalancer/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_loadbalancer/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_loadbalancer/tasks/main.yml b/test/integration/targets/azure_rm_loadbalancer/tasks/main.yml
deleted file mode 100644
index 3dad1a2e17..0000000000
--- a/test/integration/targets/azure_rm_loadbalancer/tasks/main.yml
+++ /dev/null
@@ -1,298 +0,0 @@
-- name: Prepare random number
- set_fact:
- pipaname: "pipa{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- pipbname: "pipb{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- lbvnname: "lbvn{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- lbname_a: "lba{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- lbname_b: "lbb{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- lbname_c1: "lbc1-{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- lbname_c2: "lbc2-{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- lbname_d: "lbd{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-- name: create public ip
- azure_rm_publicipaddress:
- name: "{{ pipbname }}"
- sku: Standard
- allocation_method: Static
- resource_group: '{{ resource_group }}'
-
-- name: create public ip
- azure_rm_publicipaddress:
- name: "{{ pipaname }}"
- resource_group: '{{ resource_group }}'
-
-- name: clear load balancer
- azure_rm_loadbalancer:
- resource_group: '{{ resource_group }}'
- name: "{{ lbname_a }}"
- state: absent
-
-- name: create load balancer
- azure_rm_loadbalancer:
- resource_group: '{{ resource_group }}'
- name: "{{ lbname_a }}"
- public_ip: "{{ pipaname }}"
- register: output
-
-- name: assert load balancer created
- assert:
- that: output.changed
-
-- name: delete load balancer
- azure_rm_loadbalancer:
- resource_group: '{{ resource_group }}'
- name: "{{ lbname_a }}"
- state: absent
- register: output
-
-- name: assert load balancer deleted
- assert:
- that: output.changed
-
-- name: delete load balancer (idempotent)
- azure_rm_loadbalancer:
- resource_group: '{{ resource_group }}'
- name: "{{ lbname_a }}"
- state: absent
- register: output
-
-- name: assert load balancer deleted (idempotent)
- assert:
- that: not output.changed
-
-- name: create another load balancer with more options
- azure_rm_loadbalancer:
- resource_group: '{{ resource_group }}'
- name: "{{ lbname_b }}"
- sku: Standard
- public_ip_address: "{{ pipbname }}"
- probe_protocol: Tcp
- probe_port: 80
- probe_interval: 10
- probe_fail_count: 3
- protocol: Tcp
- load_distribution: Default
- frontend_port: 80
- backend_port: 8080
- idle_timeout: 4
- natpool_frontend_port_start: 30
- natpool_frontend_port_end: 40
- natpool_backend_port: 80
- natpool_protocol: Tcp
- register: output
-
-- name: assert complex load balancer created
- assert:
- that:
- - output.changed
- - output.state.sku.name == 'Standard'
-
-- name: create load balancer again to check idempotency
- azure_rm_loadbalancer:
- resource_group: '{{ resource_group }}'
- name: "{{ lbname_b }}"
- sku: Standard
- public_ip_address: "{{ pipbname }}"
- probe_protocol: Tcp
- probe_port: 80
- probe_interval: 10
- probe_fail_count: 3
- protocol: Tcp
- load_distribution: Default
- frontend_port: 80
- backend_port: 8080
- idle_timeout: 4
- natpool_frontend_port_start: 30
- natpool_frontend_port_end: 40
- natpool_backend_port: 80
- natpool_protocol: Tcp
- register: output
-
-- name: assert that output has not changed
- assert:
- that:
- - not output.changed
-
-- name: create load balancer again to check idempotency - change something
- azure_rm_loadbalancer:
- resource_group: '{{ resource_group }}'
- name: "{{ lbname_b }}"
- sku: Standard
- public_ip_address: "{{ pipbname }}"
- probe_protocol: Tcp
- probe_port: 80
- probe_interval: 10
- probe_fail_count: 3
- protocol: Tcp
- load_distribution: Default
- frontend_port: 81
- backend_port: 8080
- idle_timeout: 4
- natpool_frontend_port_start: 30
- natpool_frontend_port_end: 40
- natpool_backend_port: 80
- natpool_protocol: Tcp
- register: output
-
-- name: assert that output has changed
- assert:
- that:
- - output.changed
-
-- name: delete load balancer
- azure_rm_loadbalancer:
- resource_group: '{{ resource_group }}'
- name: "{{ lbname_b }}"
- state: absent
-
-- name: create load balancer with multiple parameters
- azure_rm_loadbalancer:
- resource_group: '{{ resource_group }}'
- name: "{{ lbname_c1 }}"
- frontend_ip_configurations:
- - name: frontendipconf0
- public_ip_address: "{{ pipaname }}"
- backend_address_pools:
- - name: backendaddrpool0
- probes:
- - name: prob0
- port: 80
- inbound_nat_pools:
- - name: inboundnatpool0
- frontend_ip_configuration_name: frontendipconf0
- protocol: Tcp
- frontend_port_range_start: 80
- frontend_port_range_end: 81
- backend_port: 8080
- load_balancing_rules:
- - name: lbrbalancingrule0
- frontend_ip_configuration: frontendipconf0
- backend_address_pool: backendaddrpool0
- frontend_port: 80
- backend_port: 80
- probe: prob0
- register: output
-
-- name: assert complex load balancer created
- assert:
- that: output.changed
-
-- name: delete load balancer
- azure_rm_loadbalancer:
- resource_group: '{{ resource_group }}'
- name: "{{ lbname_c1 }}"
- state: absent
-
-- name: create load balancer with multiple parameters
- azure_rm_loadbalancer:
- resource_group: '{{ resource_group }}'
- name: "{{ lbname_c2 }}"
- frontend_ip_configurations:
- - name: frontendipconf0
- public_ip_address: "{{ pipaname }}"
- backend_address_pools:
- - name: backendaddrpool0
- probes:
- - name: prob0
- port: 80
- load_balancing_rules:
- - name: lbrbalancingrule0
- frontend_ip_configuration: frontendipconf0
- backend_address_pool: backendaddrpool0
- frontend_port: 80
- backend_port: 80
- probe: prob0
- inbound_nat_rules:
- - name: inboundnatrule0
- backend_port: 8080
- protocol: Tcp
- frontend_port: 8080
- frontend_ip_configuration: frontendipconf0
- register: output
-
-- name: assert complex load balancer created
- assert:
- that: output.changed
-
-- name: delete load balancer
- azure_rm_loadbalancer:
- resource_group: '{{ resource_group }}'
- name: "{{ lbname_c2 }}"
- state: absent
-
-- name: Create virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: "{{ lbvnname }}"
- address_prefixes: "10.10.0.0/16"
-
-- name: Add subnet
- azure_rm_subnet:
- resource_group: "{{ resource_group }}"
- name: "lb{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}sb"
- address_prefix: "10.10.0.0/24"
- virtual_network: "{{ lbvnname }}"
- register: subnet
-
-- name: create internal loadbalancer
- azure_rm_loadbalancer:
- resource_group: '{{ resource_group }}'
- name: "{{ lbname_d }}"
- frontend_ip_configurations:
- - name: frontendipconf0
- private_ip_address: 10.10.0.10
- private_ip_allocation_method: Static
- subnet: "{{ subnet.state.id }}"
- backend_address_pools:
- - name: backendaddrpool0
- probes:
- - name: prob0
- port: 80
- inbound_nat_pools:
- - name: inboundnatpool0
- frontend_ip_configuration_name: frontendipconf0
- protocol: Tcp
- frontend_port_range_start: 80
- frontend_port_range_end: 81
- backend_port: 8080
- load_balancing_rules:
- - name: lbrbalancingrule0
- frontend_ip_configuration: frontendipconf0
- backend_address_pool: backendaddrpool0
- frontend_port: 80
- backend_port: 80
- probe: prob0
- register: output
-
-- name: assert complex load balancer created
- assert:
- that: output.changed
-
-- name: delete load balancer
- azure_rm_loadbalancer:
- resource_group: '{{ resource_group }}'
- name: "{{ lbname_d }}"
- state: absent
-
-- name: cleanup public ip
- azure_rm_publicipaddress:
- name: "{{ item }}"
- resource_group: '{{ resource_group }}'
- state: absent
- with_items:
- - "{{ pipaname }}"
- - "{{ pipbname }}"
-
-- name: cleanup subnet
- azure_rm_subnet:
- resource_group: "{{ resource_group }}"
- name: "lb{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}sb"
- virtual_network: "{{ lbvnname }}"
- state: absent
-
-- name: cleanup virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: "{{ lbvnname }}"
- state: absent
diff --git a/test/integration/targets/azure_rm_lock/aliases b/test/integration/targets/azure_rm_lock/aliases
deleted file mode 100644
index 68bf9ddab0..0000000000
--- a/test/integration/targets/azure_rm_lock/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-destructive
-unsupported
-azure_rm_lock_info
diff --git a/test/integration/targets/azure_rm_lock/meta/main.yml b/test/integration/targets/azure_rm_lock/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_lock/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_lock/tasks/main.yml b/test/integration/targets/azure_rm_lock/tasks/main.yml
deleted file mode 100644
index 371e24c2f8..0000000000
--- a/test/integration/targets/azure_rm_lock/tasks/main.yml
+++ /dev/null
@@ -1,99 +0,0 @@
-- name: Create a virtual network
- azure_rm_virtualnetwork:
- name: mytestvirtualnetworklock
- resource_group: "{{ resource_group }}"
- address_prefixes_cidr:
- - "10.1.0.0/16"
- register: vn
-
-- name: Add lock to resource (check_mode)
- azure_rm_lock:
- name: keep
- managed_resource_id: "{{ vn.state.id }}"
- level: read_only
- register: lock
- check_mode: yes
-
-- assert:
- that:
- - lock.changed
-
-- name: Query lock
- azure_rm_lock_info:
- managed_resource_id: "{{ vn.state.id }}"
- register: locks
-
-- assert:
- that:
- - locks.locks | length == 0
-
-- name: Add lock to resource
- azure_rm_lock:
- name: keep
- managed_resource_id: "{{ vn.state.id }}"
- level: read_only
- register: lock
-
-- assert:
- that:
- - lock.changed
- - lock.id
-
-- name: Query lock
- azure_rm_lock_info:
- name: keep
- managed_resource_id: "{{ vn.state.id }}"
- register: locks
-
-- assert:
- that:
- - locks.locks | length == 1
-
-- name: Update lock to resource (idempontent)
- azure_rm_lock:
- name: keep
- managed_resource_id: "{{ vn.state.id }}"
- level: read_only
- register: lock1
-
-- assert:
- that:
- - not lock1.changed
- - lock1.id == lock.id
-
-- name: Update lock level
- azure_rm_lock:
- name: keep
- managed_resource_id: "{{ vn.state.id }}"
- level: can_not_delete
- register: lock
-
-- assert:
- that:
- - lock.changed
- - lock.level == 'can_not_delete'
-
-- name: Delete lock
- azure_rm_lock:
- name: keep
- managed_resource_id: "{{ vn.state.id }}"
- register: lock
-
-- assert:
- that:
- - lock.changed
-
-- name: Query lock
- azure_rm_lock_info:
- managed_resource_id: "{{ vn.state.id }}"
- register: locks
-
-- assert:
- that:
- - locks.locks | length == 0
-
-- name: Clean up
- azure_rm_virtualnetwork:
- name: mytestvirtualnetworklock
- resource_group: "{{ resource_group }}"
- state: absent \ No newline at end of file
diff --git a/test/integration/targets/azure_rm_loganalyticsworkspace/aliases b/test/integration/targets/azure_rm_loganalyticsworkspace/aliases
deleted file mode 100644
index 74e589c56e..0000000000
--- a/test/integration/targets/azure_rm_loganalyticsworkspace/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group4
-destructive
-azure_rm_loganalyticsworkspace_info
diff --git a/test/integration/targets/azure_rm_loganalyticsworkspace/meta/main.yml b/test/integration/targets/azure_rm_loganalyticsworkspace/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_loganalyticsworkspace/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_loganalyticsworkspace/tasks/main.yml b/test/integration/targets/azure_rm_loganalyticsworkspace/tasks/main.yml
deleted file mode 100644
index 7f9a05f79d..0000000000
--- a/test/integration/targets/azure_rm_loganalyticsworkspace/tasks/main.yml
+++ /dev/null
@@ -1,128 +0,0 @@
-- name: Prepare random number
- set_fact:
- name: "workspace{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
-
-- name: Create workspace (check mode)
- azure_rm_loganalyticsworkspace:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- retention_in_days: 40
- check_mode: yes
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Get workspace
- azure_rm_loganalyticsworkspace_info:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- register: facts
-
-- assert:
- that:
- - facts.workspaces | length == 0
-
-- name: Create workspace
- azure_rm_loganalyticsworkspace:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- retention_in_days: 40
- register: output
-
-- assert:
- that:
- - output.retention_in_days == 40
- - output.changed
- - output.intelligence_packs
-
-- name: Create workspace (idempontent)
- azure_rm_loganalyticsworkspace:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- retention_in_days: 40
- register: output
-
-- assert:
- that:
- - not output.changed
-
-- name: Get workspace
- azure_rm_loganalyticsworkspace_info:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- register: facts
-
-- assert:
- that:
- - facts.workspaces | length == 1
- - facts.workspaces[0].id == output.id
-
-- set_fact:
- pack: "{{ pack | default({}) | combine({output.intelligence_packs[0].name: not output.intelligence_packs[0].enabled}) }}"
-
-- name: Update intelligence pack
- azure_rm_loganalyticsworkspace:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- intelligence_packs: "{{ pack }}"
- register: intelligence
-
-- assert:
- that:
- - intelligence.intelligence_packs[0].enabled != output.intelligence_packs[0].enabled
-
-- name: Remove workspace (check mode)
- azure_rm_loganalyticsworkspace:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- state: absent
- check_mode: yes
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Get workspace
- azure_rm_loganalyticsworkspace_info:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- register: facts
-
-- assert:
- that:
- - facts.workspaces | length == 1
-
-- name: Remove workspace
- azure_rm_loganalyticsworkspace:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- state: absent
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Get workspace
- azure_rm_loganalyticsworkspace_info:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- register: facts
-
-- assert:
- that:
- - facts.workspaces | length == 0
-
-- name: Remove workspace (idempontent)
- azure_rm_loganalyticsworkspace:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- state: absent
- register: output
-
-- assert:
- that:
- - not output.changed
diff --git a/test/integration/targets/azure_rm_manageddisk/aliases b/test/integration/targets/azure_rm_manageddisk/aliases
deleted file mode 100644
index bf20c612b5..0000000000
--- a/test/integration/targets/azure_rm_manageddisk/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group4
-destructive
-azure_rm_manageddisk_info
diff --git a/test/integration/targets/azure_rm_manageddisk/meta/main.yml b/test/integration/targets/azure_rm_manageddisk/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_manageddisk/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_manageddisk/tasks/main.yml b/test/integration/targets/azure_rm_manageddisk/tasks/main.yml
deleted file mode 100644
index 3abaaefb63..0000000000
--- a/test/integration/targets/azure_rm_manageddisk/tasks/main.yml
+++ /dev/null
@@ -1,204 +0,0 @@
- - name: Prepare random number
- set_fact:
- rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- parameter: {}
- run_once: yes
-
- - name: Clearing (if) previous disks were created
- azure_rm_manageddisk:
- resource_group: "{{ resource_group }}"
- name: "md{{ rpfx }}{{ item }}"
- managed_by: ''
- state: absent
- with_items:
- - 1
- - 2
- - 3
-
- - name: Test invalid account name (should give error)
- azure_rm_manageddisk:
- resource_group: "{{ resource_group }}"
- name: "invalid_char$"
- disk_size_gb: 1
- register: output
- ignore_errors: yes
- check_mode: no
-
- - name: Assert task failed
- assert: { that: "output['failed'] == True" }
-
- - name: Create managed disk (Check Mode)
- azure_rm_manageddisk:
- resource_group: "{{ resource_group }}"
- name: "md{{ rpfx }}1"
- storage_account_type: "Standard_LRS"
- disk_size_gb: 1
- tags:
- testing: testing
- delete: never
- register: output
- check_mode: yes
-
- - name: Assert status succeeded (Check Mode)
- assert:
- that:
- - output.changed
- - output.state
-
- - name: Create new managed disk succesfully
- azure_rm_manageddisk:
- resource_group: "{{ resource_group }}"
- name: "md{{ rpfx }}1"
- storage_account_type: "Standard_LRS"
- disk_size_gb: 1
- tags:
- testing: testing
- delete: never
- register: output
-
- - name: Assert status succeeded and results include an Id value
- assert:
- that:
- - output.changed
- - output.state.disk_size_gb == 1
- - output.state.id is defined
- - output.state.os_type == None
- - output.state.storage_account_type == "Standard_LRS"
-
- - name: Copy disk to a new managed disk
- azure_rm_manageddisk:
- resource_group: "{{ resource_group }}"
- name: "md{{ rpfx }}2"
- create_option: "copy"
- source_uri: "{{ output.state.id }}"
- disk_size_gb: 1
- register: disk2
-
- - name: Assert status succeeded and results include an Id value
- assert:
- that:
- - disk2.changed
- - disk2.state.id is defined
-
- - name: Create disk to a new managed disk with zone and os type
- azure_rm_manageddisk:
- resource_group: "{{ resource_group }}"
- name: "md{{ rpfx }}3"
- source_uri: "{{ output.state.id }}"
- disk_size_gb: 1
- zone: "1"
- os_type: windows
- register: disk3
-
- - name: Assert status succeeded and results include an Id value
- assert:
- that:
- - disk3.changed
- - disk3.state.id is defined
- - disk3.state.zone == "1"
- - disk3.state.os_type == "windows"
-
- - name: Change storage account type to an invalid type
- azure_rm_manageddisk:
- resource_group: "{{ resource_group }}"
- name: "md{{ rpfx }}1"
- storage_account_type: "PremiumL"
- disk_size_gb: 1
- register: output
- ignore_errors: yes
-
- - name: Assert storage account type change failed
- assert: { that: "output['failed'] == True" }
-
- - name: Update disk options (os_type, account_type, size, tags)
- azure_rm_manageddisk:
- resource_group: "{{ resource_group }}"
- name: "md{{ rpfx }}1"
- disk_size_gb: 2
- os_type: linux
- storage_account_type: "StandardSSD_LRS"
- tags:
- galaxy: "no"
- delete: never
- register: output
-
- - assert:
- that:
- - output.changed
- - output.state.storage_account_type == "StandardSSD_LRS"
- - output.state.disk_size_gb == 2
- - "output.state.tags | length == 2"
- - "output.state.tags.galaxy == 'no'"
- - output.state.os_type == 'linux'
-
- - name: Gather facts to one specific disk
- azure_rm_manageddisk_info:
- resource_group: "{{ resource_group }}"
- name: "md{{ rpfx }}1"
- register: output
-
- - assert:
- that:
- - "output.ansible_info.azure_managed_disk | length == 1"
- - output.ansible_info.azure_managed_disk[0].storage_account_type == "StandardSSD_LRS"
- - output.ansible_info.azure_managed_disk[0].disk_size_gb == 2
- - "output.ansible_info.azure_managed_disk[0].os_type == 'linux'"
-
- - set_fact:
- parameter: "{{parameter |combine({item.key: item.value})}}"
- when: "{{item.key not in ['id', 'changed'] and item.value != None}}"
- with_dict: "{{ output.ansible_info.azure_managed_disk[0] }}"
-
- - name: Create disk with facts return value
- azure_rm_manageddisk:
- resource_group: "{{ resource_group }}"
- location: "{{ parameter.location }}"
- name: "{{ parameter.name }}"
- storage_account_type: "{{ parameter.storage_account_type }}"
- disk_size_gb: "{{ parameter.disk_size_gb }}"
- create_option: "{{ parameter.create_option }}"
- tags: "{{ parameter.tags }}"
- register: output
-
- - assert:
- that:
- - not output.changed
-
- - name: Delete managed disk (Check Mode)
- azure_rm_manageddisk:
- resource_group: "{{ resource_group }}"
- name: "md{{ rpfx }}1"
- state: absent
- register: output
- check_mode: yes
-
- - name: Assert status succeeded
- assert:
- that:
- - output.changed
- - output.state
-
- - name: Delete all managed disk
- azure_rm_manageddisk:
- resource_group: "{{ resource_group }}"
- name: "md{{ rpfx }}{{ item }}"
- managed_by: ''
- state: absent
- with_items:
- - 1
- - 2
- - 3
-
- - name: Delete virtual machine
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "tr{{ rpfx }}"
- state: absent
- vm_size: Standard_DS1_v2
-
- - name: Delete public ip
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- allocation_method: Static
- name: "tr{{ rpfx }}"
- state: absent
diff --git a/test/integration/targets/azure_rm_mariadbserver/aliases b/test/integration/targets/azure_rm_mariadbserver/aliases
deleted file mode 100644
index b586dc7c3e..0000000000
--- a/test/integration/targets/azure_rm_mariadbserver/aliases
+++ /dev/null
@@ -1,8 +0,0 @@
-cloud/azure
-destructive
-shippable/azure/group9
-azure_rm_mariadbserver_facts
-azure_rm_mariadbdatabase
-azure_rm_mariadbdatabase_facts
-azure_rm_mariadbfirewallrule
-azure_rm_mariadbfirewallrule_facts
diff --git a/test/integration/targets/azure_rm_mariadbserver/meta/main.yml b/test/integration/targets/azure_rm_mariadbserver/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_mariadbserver/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_mariadbserver/tasks/main.yml b/test/integration/targets/azure_rm_mariadbserver/tasks/main.yml
deleted file mode 100644
index 5b33ffb951..0000000000
--- a/test/integration/targets/azure_rm_mariadbserver/tasks/main.yml
+++ /dev/null
@@ -1,640 +0,0 @@
-- name: Prepare random number
- set_fact:
- rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-- name: Create instance of MariaDB Server -- check mode
- azure_rm_mariadbserver:
- resource_group: "{{ resource_group }}"
- name: mariadbsrv{{ rpfx }}
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 51200
- version: 10.2
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- check_mode: yes
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create instance of MariaDB Server
- azure_rm_mariadbserver:
- resource_group: "{{ resource_group }}"
- name: mariadbsrv{{ rpfx }}
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 51200
- version: 10.2
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
- - output.state == 'Ready'
-
-- name: Create again instance of MariaDB Server
- azure_rm_mariadbserver:
- resource_group: "{{ resource_group }}"
- name: mariadbsrv{{ rpfx }}
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 51200
- version: 10.2
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed == false
- - output.state == 'Ready'
-
-- name: Update instance of MariaDB Server, change storage size
- azure_rm_mariadbserver:
- resource_group: "{{ resource_group }}"
- name: mariadbsrv{{ rpfx }}
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 128000
- version: 10.2
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed
- - output.state == 'Ready'
-- debug:
- var: output
-
-- name: Gather facts MariaDB Server
- azure_rm_mariadbserver_facts:
- resource_group: "{{ resource_group }}"
- name: mariadbsrv{{ rpfx }}
- register: output
-- name: Assert that storage size is correct
- assert:
- that:
- - output.servers[0]['storage_mb'] == 128000
-
-- name: Create second instance of MariaDB Server
- azure_rm_mariadbserver:
- resource_group: "{{ resource_group }}"
- name: mariadbsrv{{ rpfx }}second
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 51200
- version: 10.2
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- tags:
- aaa: bbb
-
-- name: Create second instance of MariaDB Server
- azure_rm_mariadbserver:
- resource_group: "{{ resource_group }}"
- name: mariadbsrv{{ rpfx }}second
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 51200
- version: 10.2
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- tags:
- ccc: ddd
-
-- name: Gather facts MariaDB Server
- azure_rm_mariadbserver_facts:
- resource_group: "{{ resource_group }}"
- name: mariadbsrv{{ rpfx }}second
- register: output
-
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.servers[0]['id'] != None
- - output.servers[0]['name'] != None
- - output.servers[0]['location'] != None
- - output.servers[0]['sku']['name'] != None
- - output.servers[0]['sku']['tier'] != None
- - output.servers[0]['sku']['capacity'] != None
- - output.servers[0]['version'] != None
- - output.servers[0]['user_visible_state'] != None
- - output.servers[0]['fully_qualified_domain_name'] != None
- - output.servers[0]['tags']['aaa'] == 'bbb'
- - output.servers[0]['tags']['ccc'] == 'ddd'
-
-- name: Gather facts MariaDB Server
- azure_rm_mariadbserver_facts:
- resource_group: "{{ resource_group }}"
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.servers[0]['id'] != None
- - output.servers[0]['name'] != None
- - output.servers[0]['location'] != None
- - output.servers[0]['sku']['name'] != None
- - output.servers[0]['sku']['tier'] != None
- - output.servers[0]['sku']['capacity'] != None
- - output.servers[0]['version'] != None
- - output.servers[0]['user_visible_state'] != None
- - output.servers[0]['fully_qualified_domain_name'] != None
- - output.servers[1]['id'] != None
- - output.servers[1]['name'] != None
- - output.servers[1]['location'] != None
- - output.servers[1]['sku']['name'] != None
- - output.servers[1]['sku']['tier'] != None
- - output.servers[1]['sku']['capacity'] != None
- - output.servers[1]['version'] != None
- - output.servers[1]['user_visible_state'] != None
- - output.servers[1]['fully_qualified_domain_name'] != None
-
-#
-# azure_rm_mariadbdatabase tests below
-#
-- name: Create instance of MariaDB Database -- check mode
- azure_rm_mariadbdatabase:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: testdatabase
- check_mode: yes
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create instance of MariaDB Database
- azure_rm_mariadbdatabase:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: testdatabase
- collation: latin1_swedish_ci
- charset: latin1
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
- - output.name == 'testdatabase'
-
-- name: Create again instance of MariaDB Database
- azure_rm_mariadbdatabase:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: testdatabase
- collation: latin1_swedish_ci
- charset: latin1
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed == false
- - output.name == 'testdatabase'
-
-- name: Try to update database without force_update
- azure_rm_mariadbdatabase:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: testdatabase
- collation: latin1_czech_ci
- charset: latin1
- ignore_errors: yes
- register: output
-- name: Assert that nothing has changed
- assert:
- that:
- - output.changed == False
-
-- name: Update instance of database using force_update
- azure_rm_mariadbdatabase:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: testdatabase
- collation: latin1_czech_ci
- charset: latin1
- force_update: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
- - output.name == 'testdatabase'
-
-- name: Create second instance of MariaDB Database
- azure_rm_mariadbdatabase:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: testdatabase2
-
-- name: Gather facts MariaDB Database
- azure_rm_mariadbdatabase_facts:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: testdatabase
- register: output
-
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.databases[0]['server_name'] != None
- - output.databases[0]['name'] != None
- - output.databases[0]['charset'] != None
- - output.databases[0]['collation'] != None
-
-- name: Gather facts MariaDB Database
- azure_rm_mariadbdatabase_facts:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.databases[0]['server_name'] != None
- - output.databases[0]['name'] != None
- - output.databases[0]['charset'] != None
- - output.databases[0]['collation'] != None
- - output.databases[1]['server_name'] != None
- - output.databases[1]['name'] != None
- - output.databases[1]['charset'] != None
- - output.databases[1]['collation'] != None
-
-- name: Delete instance of MariaDB Database -- check mode
- azure_rm_mariadbdatabase:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: testdatabase
- state: absent
- check_mode: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete instance of MariaDB Database
- azure_rm_mariadbdatabase:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: testdatabase
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete unexisting instance of MariaDB Database
- azure_rm_mariadbdatabase:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: testdatabase
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed == false
-
-#
-# azure_rm_firewallrule tests below
-#
-- name: Create instance of Firewall Rule -- check mode
- azure_rm_mariadbfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- start_ip_address: 172.28.10.136
- end_ip_address: 172.28.10.138
- check_mode: yes
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create instance of Firewall Rule
- azure_rm_mariadbfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- start_ip_address: 172.28.10.136
- end_ip_address: 172.28.10.138
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create again instance of Firewall Rule
- azure_rm_mariadbfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- start_ip_address: 172.28.10.136
- end_ip_address: 172.28.10.138
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed == false
-
-- name: Delete instance of Firewall Rule -- check mode
- azure_rm_mariadbfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- state: absent
- check_mode: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Create instance of Firewall Rule -- second
- azure_rm_mariadbfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}second
- start_ip_address: 172.28.10.136
- end_ip_address: 172.28.10.138
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Gather facts MariaDB Firewall Rule
- azure_rm_mariadbfirewallrule_facts:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.rules[0].id != None
- - output.rules[0].server_name != None
- - output.rules[0].name != None
- - output.rules[0].start_ip_address != None
- - output.rules[0].end_ip_address != None
- - "output.rules | length == 1"
-
-- name: Gather facts MariaDB Firewall Rule
- azure_rm_mariadbfirewallrule_facts:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.rules[0].id != None
- - output.rules[0].server_name != None
- - output.rules[0].name != None
- - output.rules[0].start_ip_address != None
- - output.rules[0].end_ip_address != None
- - output.rules[1].id != None
- - output.rules[1].name != None
- - output.rules[1].start_ip_address != None
- - output.rules[1].end_ip_address != None
- - "output.rules | length == 2"
-
-- name: Delete instance of Firewall Rule
- azure_rm_mariadbfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete unexisting instance of Firewall Rule
- azure_rm_mariadbfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed == false
-
-- name: Delete instance of Firewall Rule - second
- azure_rm_mariadbfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}second
- state: absent
-
-- name: Gather facts MariaDB Firewall Rule
- azure_rm_mariadbfirewallrule_facts:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- register: output
-- name: Assert that empty list was returned
- assert:
- that:
- - output.changed == False
- - "output.rules | length == 0"
-
-#
-# configuration
-#
-- name: Create instance of Configuration -- check mode
- azure_rm_mariadbconfiguration:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: event_scheduler
- value: "ON"
- check_mode: yes
- register: output
-- name: Assert that change was registered
- assert:
- that:
- - output.changed
-
-- name: Try to delete default configuraion
- azure_rm_mariadbconfiguration_facts:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: event_scheduler
- register: output
-- name: Get facts of event_scheduler
- debug:
- var: output
-
-- name: Try to delete default configuraion
- azure_rm_mariadbconfiguration:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: event_scheduler
- state: absent
- register: output
-- name: Assert that change was registered
- assert:
- that:
- - not output.changed
-
-- name: Try to change default configuraion
- azure_rm_mariadbconfiguration:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: event_scheduler
- value: "ON"
- register: output
-- name: Assert that change was registered
- assert:
- that:
- - output.changed
-
-- name: Try to change default configuration -- idempotent
- azure_rm_mariadbconfiguration:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: event_scheduler
- value: "ON"
- register: output
-- name: Assert that change was registered
- assert:
- that:
- - not output.changed
-
-- name: Try to reset configuration
- azure_rm_mariadbconfiguration:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: event_scheduler
- state: absent
- register: output
-- name: Assert that change was registered
- assert:
- that:
- - output.changed
-
-- name: Try to reset configuration -- idempotent
- azure_rm_mariadbconfiguration:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: event_scheduler
- state: absent
- register: output
-- name: Assert that change was registered
- assert:
- that:
- - not output.changed
-
-- name: Gather facts MariaDB Configuration
- azure_rm_mariadbconfiguration_facts:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- name: event_scheduler
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.settings[0].id != None
- - output.settings[0].name != None
- - output.settings[0].value != None
- - output.settings[0].description != None
- - output.settings[0].source != None
- - output.settings | length == 1
-
-- name: Gather facts MariaDB Configuration
- azure_rm_mariadbconfiguration_facts:
- resource_group: "{{ resource_group }}"
- server_name: mariadbsrv{{ rpfx }}
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.settings[0].id != None
- - output.settings[0].name != None
- - output.settings[0].value != None
- - output.settings[0].description != None
- - output.settings[0].source != None
- - output.settings | length > 1
-
-#
-# clean up azure_rm_mariadbserver test
-#
-
-- name: Delete instance of MariaDB Server -- check mode
- azure_rm_mariadbserver:
- resource_group: "{{ resource_group }}"
- name: mariadbsrv{{ rpfx }}
- state: absent
- check_mode: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete instance of MariaDB Server
- azure_rm_mariadbserver:
- resource_group: "{{ resource_group }}"
- name: mariadbsrv{{ rpfx }}
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete unexisting instance of MariaDB Server
- azure_rm_mariadbserver:
- resource_group: "{{ resource_group }}"
- name: mariadbsrv{{ rpfx }}
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed == false
-
-- name: Delete second instance of MariaDB Server
- azure_rm_mariadbserver:
- resource_group: "{{ resource_group }}"
- name: mariadbsrv{{ rpfx }}second
- state: absent
- async: 400
- poll: 0
diff --git a/test/integration/targets/azure_rm_monitorlogprofile/aliases b/test/integration/targets/azure_rm_monitorlogprofile/aliases
deleted file mode 100644
index 35b9401151..0000000000
--- a/test/integration/targets/azure_rm_monitorlogprofile/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/azure
-destructive
-unsupported \ No newline at end of file
diff --git a/test/integration/targets/azure_rm_monitorlogprofile/meta/main.yml b/test/integration/targets/azure_rm_monitorlogprofile/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_monitorlogprofile/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_monitorlogprofile/tasks/main.yml b/test/integration/targets/azure_rm_monitorlogprofile/tasks/main.yml
deleted file mode 100644
index 9019f59b28..0000000000
--- a/test/integration/targets/azure_rm_monitorlogprofile/tasks/main.yml
+++ /dev/null
@@ -1,104 +0,0 @@
-- name: Prepare random number
- set_fact:
- profile_name: "profile{{ resource_group | hash('md5') | truncate(5, True, '') }}{{ 1000 | random }}"
- location: eastus
- run_once: yes
-
-- name: create Azure storage account
- azure_rm_storageaccount:
- name: '{{ profile_name }}'
- resource_group: "{{ resource_group }}"
- account_type: Standard_LRS
-
-- name: create log profile (idempotence)
- azure_rm_monitorlogprofile:
- name: "{{ profile_name }}"
- location: "{{ location }}"
- locations:
- - eastus
- - westus
- categories:
- - Write
- - Action
- retention_policy:
- enabled: False
- days: 1
- storage_account:
- resource_group: "{{ resource_group }}"
- name: "{{ profile_name }}"
- check_mode: yes
- register: output
-
-- name: assert create idempotence
- assert:
- that:
- - output.changed
-
-- name: create log profile
- azure_rm_monitorlogprofile:
- name: "{{ profile_name }}"
- location: "{{ location }}"
- locations:
- - eastus
- - westus
- categories:
- - Write
- - Action
- retention_policy:
- enabled: False
- days: 1
- storage_account:
- resource_group: "{{ resource_group }}"
- name: "{{ profile_name }}"
- register: output
-
-- name: assert create
- assert:
- that:
- - output.changed
- - output.id
-
-- name: update log profile (idempotence)
- azure_rm_monitorlogprofile:
- name: "{{ profile_name }}"
- location: "{{ location }}"
- locations:
- - eastus
- - westus
- categories:
- - Write
- - Action
- retention_policy:
- enabled: False
- days: 1
- storage_account:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_name }}"
- register: output
-
-- name: assert update idempotence
- assert:
- that:
- - output.changed == False
-
-- name: update log profile
- azure_rm_monitorlogprofile:
- name: "{{ profile_name }}"
- location: "{{ location }}"
- locations:
- - eastus
- categories:
- - Write
- - Action
- retention_policy:
- enabled: False
- days: 2
- storage_account:
- resource_group: "{{ resource_group }}"
- name: "{{ profile_name }}"
- register: output
-
-- name: assert update
- assert:
- that:
- - output.changed
diff --git a/test/integration/targets/azure_rm_mysqlserver/aliases b/test/integration/targets/azure_rm_mysqlserver/aliases
deleted file mode 100644
index 9e3fd9ae68..0000000000
--- a/test/integration/targets/azure_rm_mysqlserver/aliases
+++ /dev/null
@@ -1,10 +0,0 @@
-cloud/azure
-destructive
-shippable/azure/group8
-azure_rm_mysqlserver_facts
-azure_rm_mysqldatabase
-azure_rm_mysqldatabase_facts
-azure_rm_mysqlfirewallrule
-azure_rm_mysqlfirewallrule_facts
-azure_rm_mysqlconfiguration
-azure_rm_mysqlconfiguration_facts
diff --git a/test/integration/targets/azure_rm_mysqlserver/meta/main.yml b/test/integration/targets/azure_rm_mysqlserver/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_mysqlserver/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_mysqlserver/tasks/main.yml b/test/integration/targets/azure_rm_mysqlserver/tasks/main.yml
deleted file mode 100644
index 8f51010d82..0000000000
--- a/test/integration/targets/azure_rm_mysqlserver/tasks/main.yml
+++ /dev/null
@@ -1,640 +0,0 @@
-- name: Prepare random number
- set_fact:
- rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-- name: Create instance of MySQL Server -- check mode
- azure_rm_mysqlserver:
- resource_group: "{{ resource_group }}"
- name: mysqlsrv{{ rpfx }}
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 51200
- version: 5.6
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- check_mode: yes
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create instance of MySQL Server
- azure_rm_mysqlserver:
- resource_group: "{{ resource_group }}"
- name: mysqlsrv{{ rpfx }}
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 51200
- version: 5.6
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
- - output.state == 'Ready'
-
-- name: Create again instance of MySQL Server
- azure_rm_mysqlserver:
- resource_group: "{{ resource_group }}"
- name: mysqlsrv{{ rpfx }}
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 51200
- version: 5.6
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed == false
- - output.state == 'Ready'
-
-- name: Update instance of MySQL Server, change storage size
- azure_rm_mysqlserver:
- resource_group: "{{ resource_group }}"
- name: mysqlsrv{{ rpfx }}
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 128000
- version: 5.6
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed
- - output.state == 'Ready'
-- debug:
- var: output
-
-- name: Gather facts MySQL Server
- azure_rm_mysqlserver_facts:
- resource_group: "{{ resource_group }}"
- name: mysqlsrv{{ rpfx }}
- register: output
-- name: Assert that storage size is correct
- assert:
- that:
- - output.servers[0]['storage_mb'] == 128000
-
-- name: Create second instance of MySQL Server
- azure_rm_mysqlserver:
- resource_group: "{{ resource_group }}"
- name: mysqlsrv{{ rpfx }}second
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 51200
- version: 5.6
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- tags:
- aaa: bbb
-
-- name: Create second instance of MySQL Server
- azure_rm_mysqlserver:
- resource_group: "{{ resource_group }}"
- name: mysqlsrv{{ rpfx }}second
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 51200
- version: 5.6
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- tags:
- ccc: ddd
-
-- name: Gather facts MySQL Server
- azure_rm_mysqlserver_facts:
- resource_group: "{{ resource_group }}"
- name: mysqlsrv{{ rpfx }}second
- register: output
-
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.servers[0]['id'] != None
- - output.servers[0]['name'] != None
- - output.servers[0]['location'] != None
- - output.servers[0]['sku']['name'] != None
- - output.servers[0]['sku']['tier'] != None
- - output.servers[0]['sku']['capacity'] != None
- - output.servers[0]['version'] != None
- - output.servers[0]['user_visible_state'] != None
- - output.servers[0]['fully_qualified_domain_name'] != None
- - output.servers[0]['tags']['aaa'] == 'bbb'
- - output.servers[0]['tags']['ccc'] == 'ddd'
-
-- name: Gather facts MySQL Server
- azure_rm_mysqlserver_facts:
- resource_group: "{{ resource_group }}"
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.servers[0]['id'] != None
- - output.servers[0]['name'] != None
- - output.servers[0]['location'] != None
- - output.servers[0]['sku']['name'] != None
- - output.servers[0]['sku']['tier'] != None
- - output.servers[0]['sku']['capacity'] != None
- - output.servers[0]['version'] != None
- - output.servers[0]['user_visible_state'] != None
- - output.servers[0]['fully_qualified_domain_name'] != None
- - output.servers[1]['id'] != None
- - output.servers[1]['name'] != None
- - output.servers[1]['location'] != None
- - output.servers[1]['sku']['name'] != None
- - output.servers[1]['sku']['tier'] != None
- - output.servers[1]['sku']['capacity'] != None
- - output.servers[1]['version'] != None
- - output.servers[1]['user_visible_state'] != None
- - output.servers[1]['fully_qualified_domain_name'] != None
-
-#
-# azure_rm_mysqldatabase tests below
-#
-- name: Create instance of MySQL Database -- check mode
- azure_rm_mysqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: testdatabase
- check_mode: yes
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create instance of MySQL Database
- azure_rm_mysqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: testdatabase
- collation: latin1_swedish_ci
- charset: latin1
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
- - output.name == 'testdatabase'
-
-- name: Create again instance of MySQL Database
- azure_rm_mysqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: testdatabase
- collation: latin1_swedish_ci
- charset: latin1
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed == false
- - output.name == 'testdatabase'
-
-- name: Try to update database without force_update
- azure_rm_mysqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: testdatabase
- collation: latin1_czech_ci
- charset: latin1
- ignore_errors: yes
- register: output
-- name: Assert that nothing has changed
- assert:
- that:
- - output.changed == False
-
-- name: Update instance of database using force_update
- azure_rm_mysqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: testdatabase
- collation: latin1_czech_ci
- charset: latin1
- force_update: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
- - output.name == 'testdatabase'
-
-- name: Create second instance of MySQL Database
- azure_rm_mysqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: testdatabase2
-
-- name: Gather facts MySQL Database
- azure_rm_mysqldatabase_facts:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: testdatabase
- register: output
-
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.databases[0]['server_name'] != None
- - output.databases[0]['name'] != None
- - output.databases[0]['charset'] != None
- - output.databases[0]['collation'] != None
-
-- name: Gather facts MySQL Database
- azure_rm_mysqldatabase_facts:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.databases[0]['server_name'] != None
- - output.databases[0]['name'] != None
- - output.databases[0]['charset'] != None
- - output.databases[0]['collation'] != None
- - output.databases[1]['server_name'] != None
- - output.databases[1]['name'] != None
- - output.databases[1]['charset'] != None
- - output.databases[1]['collation'] != None
-
-- name: Delete instance of MySQL Database -- check mode
- azure_rm_mysqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: testdatabase
- state: absent
- check_mode: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete instance of MySQL Database
- azure_rm_mysqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: testdatabase
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete unexisting instance of MySQL Database
- azure_rm_mysqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: testdatabase
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed == false
-
-#
-# azure_rm_firewallrule tests below
-#
-- name: Create instance of Firewall Rule -- check mode
- azure_rm_mysqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- start_ip_address: 172.28.10.136
- end_ip_address: 172.28.10.138
- check_mode: yes
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create instance of Firewall Rule
- azure_rm_mysqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- start_ip_address: 172.28.10.136
- end_ip_address: 172.28.10.138
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create again instance of Firewall Rule
- azure_rm_mysqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- start_ip_address: 172.28.10.136
- end_ip_address: 172.28.10.138
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed == false
-
-- name: Delete instance of Firewall Rule -- check mode
- azure_rm_mysqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- state: absent
- check_mode: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Create instance of Firewall Rule -- second
- azure_rm_mysqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}second
- start_ip_address: 172.28.10.136
- end_ip_address: 172.28.10.138
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Gather facts MySQL Firewall Rule
- azure_rm_mysqlfirewallrule_facts:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.rules[0].id != None
- - output.rules[0].server_name != None
- - output.rules[0].name != None
- - output.rules[0].start_ip_address != None
- - output.rules[0].end_ip_address != None
- - "output.rules | length == 1"
-
-- name: Gather facts MySQL Firewall Rule
- azure_rm_mysqlfirewallrule_facts:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.rules[0].id != None
- - output.rules[0].server_name != None
- - output.rules[0].name != None
- - output.rules[0].start_ip_address != None
- - output.rules[0].end_ip_address != None
- - output.rules[1].id != None
- - output.rules[1].name != None
- - output.rules[1].start_ip_address != None
- - output.rules[1].end_ip_address != None
- - "output.rules | length == 2"
-
-- name: Delete instance of Firewall Rule
- azure_rm_mysqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete unexisting instance of Firewall Rule
- azure_rm_mysqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed == false
-
-- name: Delete instance of Firewall Rule - second
- azure_rm_mysqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}second
- state: absent
-
-- name: Gather facts MySQL Firewall Rule
- azure_rm_mysqlfirewallrule_facts:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- register: output
-- name: Assert that empty list was returned
- assert:
- that:
- - output.changed == False
- - "output.rules | length == 0"
-
-#
-# configuration
-#
-- name: Create instance of Configuration -- check mode
- azure_rm_mysqlconfiguration:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: event_scheduler
- value: "ON"
- check_mode: yes
- register: output
-- name: Assert that change was registered
- assert:
- that:
- - output.changed
-
-- name: Try to delete default configuraion
- azure_rm_mysqlconfiguration_facts:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: event_scheduler
- register: output
-- name: Get facts of event_scheduler
- debug:
- var: output
-
-- name: Try to delete default configuraion
- azure_rm_mysqlconfiguration:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: event_scheduler
- state: absent
- register: output
-- name: Assert that change was registered
- assert:
- that:
- - not output.changed
-
-- name: Try to change default configuraion
- azure_rm_mysqlconfiguration:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: event_scheduler
- value: "ON"
- register: output
-- name: Assert that change was registered
- assert:
- that:
- - output.changed
-
-- name: Try to change default configuration -- idempotent
- azure_rm_mysqlconfiguration:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: event_scheduler
- value: "ON"
- register: output
-- name: Assert that change was registered
- assert:
- that:
- - not output.changed
-
-- name: Try to reset configuration
- azure_rm_mysqlconfiguration:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: event_scheduler
- state: absent
- register: output
-- name: Assert that change was registered
- assert:
- that:
- - output.changed
-
-- name: Try to reset configuration -- idempotent
- azure_rm_mysqlconfiguration:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: event_scheduler
- state: absent
- register: output
-- name: Assert that change was registered
- assert:
- that:
- - not output.changed
-
-- name: Gather facts MySQL Configuration
- azure_rm_mysqlconfiguration_facts:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- name: event_scheduler
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.settings[0].id != None
- - output.settings[0].name != None
- - output.settings[0].value != None
- - output.settings[0].description != None
- - output.settings[0].source != None
- - output.settings | length == 1
-
-- name: Gather facts MySQL Configuration
- azure_rm_mysqlconfiguration_facts:
- resource_group: "{{ resource_group }}"
- server_name: mysqlsrv{{ rpfx }}
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.settings[0].id != None
- - output.settings[0].name != None
- - output.settings[0].value != None
- - output.settings[0].description != None
- - output.settings[0].source != None
- - output.settings | length > 1
-
-#
-# clean up azure_rm_mysqlserver test
-#
-
-- name: Delete instance of MySQL Server -- check mode
- azure_rm_mysqlserver:
- resource_group: "{{ resource_group }}"
- name: mysqlsrv{{ rpfx }}
- state: absent
- check_mode: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete instance of MySQL Server
- azure_rm_mysqlserver:
- resource_group: "{{ resource_group }}"
- name: mysqlsrv{{ rpfx }}
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete unexisting instance of MySQL Server
- azure_rm_mysqlserver:
- resource_group: "{{ resource_group }}"
- name: mysqlsrv{{ rpfx }}
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed == false
-
-- name: Delete second instance of MySQL Server
- azure_rm_mysqlserver:
- resource_group: "{{ resource_group }}"
- name: mysqlsrv{{ rpfx }}second
- state: absent
- async: 400
- poll: 0
diff --git a/test/integration/targets/azure_rm_networkinterface/aliases b/test/integration/targets/azure_rm_networkinterface/aliases
deleted file mode 100644
index 67f8675677..0000000000
--- a/test/integration/targets/azure_rm_networkinterface/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group5
-destructive
-azure_rm_applicationsecuritygroup \ No newline at end of file
diff --git a/test/integration/targets/azure_rm_networkinterface/meta/main.yml b/test/integration/targets/azure_rm_networkinterface/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_networkinterface/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_networkinterface/tasks/main.yml b/test/integration/targets/azure_rm_networkinterface/tasks/main.yml
deleted file mode 100644
index bd7df0a29f..0000000000
--- a/test/integration/targets/azure_rm_networkinterface/tasks/main.yml
+++ /dev/null
@@ -1,554 +0,0 @@
-- name: Prepare random number
- set_fact:
- rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- applicationsecuritygroup_name1: "asg{{ resource_group | hash('md5') | truncate(5, True, '') }}{{ 1000 | random }}"
- applicationsecuritygroup_name2: "asg{{ resource_group | hash('md5') | truncate(5, True, '') }}{{ 1000 | random }}"
- nic_name1: "nic1{{ resource_group | hash('md5') | truncate(5, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-- name: Create virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group_secondary }}"
- name: "tn{{ rpfx }}"
- address_prefixes: "10.10.0.0/16"
- register: vn
-
-- name: Add subnet
- azure_rm_subnet:
- resource_group: "{{ resource_group_secondary }}"
- name: "tn{{ rpfx }}"
- address_prefix: "10.10.0.0/24"
- virtual_network: "tn{{ rpfx }}"
-
-- name: create public ip
- azure_rm_publicipaddress:
- name: "pip{{ rpfx }}"
- resource_group: '{{ resource_group }}'
-
-- name: create load balancer with multiple parameters
- azure_rm_loadbalancer:
- resource_group: '{{ resource_group }}'
- name: "lb{{ rpfx }}"
- frontend_ip_configurations:
- - name: frontendipconf0
- public_ip_address: "pip{{ rpfx }}"
- backend_address_pools:
- - name: backendaddrpool0
- - name: backendaddrpool1
- probes:
- - name: prob0
- port: 80
- inbound_nat_pools:
- - name: inboundnatpool0
- frontend_ip_configuration_name: frontendipconf0
- protocol: Tcp
- frontend_port_range_start: 80
- frontend_port_range_end: 81
- backend_port: 8080
- load_balancing_rules:
- - name: lbrbalancingrule0
- frontend_ip_configuration: frontendipconf0
- backend_address_pool: backendaddrpool0
- frontend_port: 80
- backend_port: 80
- probe: prob0
- register: lb
-
-- name: Create most simple NIC with virtual_network id (check mode)
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "tn{{ rpfx }}"
- virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
- public_ip: False
- create_with_security_group: False
- register: output
- check_mode: yes
-
-- assert:
- that:
- - output.changed
-
-- name: Create most simple NIC with virtual_network resource_group
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "tn{{ rpfx }}"
- virtual_network:
- name: "tn{{ rpfx }}"
- resource_group: "{{ resource_group_secondary }}"
- subnet: "tn{{ rpfx }}"
- public_ip: False
- create_with_security_group: False
- register: output
-
-- assert:
- that:
- - output.changed
- - output.state.id
-
-- name: Get fact of the new created NIC
- azure_rm_networkinterface_info:
- resource_group: "{{ resource_group }}"
- name: "tn{{ rpfx }}"
- register: facts
-
-- assert:
- that:
- - "facts.networkinterfaces | length == 1"
- - facts.networkinterfaces[0].id == output.state.id
- - "facts.networkinterfaces[0].ip_configurations | length == 1"
- - not facts.networkinterfaces[0].security_group
- - not facts.networkinterfaces[0].ip_configurations[0].public_ip_address
- - not facts.networkinterfaces[0].enable_ip_forwarding
- - not facts.networkinterfaces[0].enable_accelerated_networking
-
-- name: Create most simple NIC with ip configurations (idempotent)
- azure_rm_networkinterface:
- resource_group: "{{ facts.networkinterfaces[0].resource_group }}"
- name: "{{ facts.networkinterfaces[0].name }}"
- virtual_network: "{{ facts.networkinterfaces[0].virtual_network }}"
- create_with_security_group: False
- ip_configurations:
- - name: "{{ facts.networkinterfaces[0].ip_configurations[0].name }}"
- private_ip_address: "{{ facts.networkinterfaces[0].ip_configurations[0].private_ip_address }}"
- private_ip_allocation_method: "{{ facts.networkinterfaces[0].ip_configurations[0].private_ip_allocation_method }}"
- primary: "{{ facts.networkinterfaces[0].ip_configurations[0].primary }}"
- subnet: "{{ facts.networkinterfaces[0].subnet }}"
- register: output
-
-- assert:
- that:
- - not output.changed
-
-- name: Create most simple NIC (idempotent)
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "tn{{ rpfx }}"
- virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
- create_with_security_group: False
- public_ip: False
- register: output
-
-- assert:
- that:
- - not output.changed
-
-- name: Update security group (check mode)
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "tn{{ rpfx }}"
- virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
- public_ip: False
- security_group: "tn{{ rpfx }}sg"
- register: output
- check_mode: yes
-
-- assert:
- that:
- - output.changed
-
-- name: Update public ip address (check mode)
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "tn{{ rpfx }}"
- virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
- public_ip_address_name: "tn{{ rpfx }}"
- create_with_security_group: False
- register: output
- check_mode: yes
-
-- assert:
- that:
- - output.changed
-
-- name: Update accelerated networking (check mode)
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "tn{{ rpfx }}"
- virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
- enable_accelerated_networking: True
- create_with_security_group: False
- public_ip: False
- register: output
- check_mode: yes
-
-- assert:
- that:
- - output.changed
-
-- name: Update IP forwarding networking (check mode)
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "tn{{ rpfx }}"
- virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
- create_with_security_group: False
- enable_ip_forwarding: True
- public_ip: False
- register: output
- check_mode: yes
-
-- assert:
- that:
- - output.changed
-
-- name: Update dns server (check mode)
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "tn{{ rpfx }}"
- virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
- create_with_security_group: False
- public_ip: False
- dns_servers:
- - 8.9.10.11
- - 7.8.9.10
- register: output
- check_mode: yes
-
-- assert:
- that:
- - output.changed
-
-- name: Update NIC
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "tn{{ rpfx }}"
- virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
- enable_accelerated_networking: True
- enable_ip_forwarding: True
- security_group: "tn{{ rpfx }}sg"
- dns_servers:
- - 8.9.10.11
- - 7.8.9.10
- ip_configurations:
- - name: "{{ facts.networkinterfaces[0].ip_configurations[0].name }}"
- private_ip_address: "{{ facts.networkinterfaces[0].ip_configurations[0].private_ip_address }}"
- private_ip_allocation_method: "{{ facts.networkinterfaces[0].ip_configurations[0].private_ip_allocation_method }}"
- primary: "{{ facts.networkinterfaces[0].ip_configurations[0].primary }}"
- - name: ipconfig1
- public_ip_name: "tn{{ rpfx }}"
- load_balancer_backend_address_pools:
- - "{{ lb.state.backend_address_pools[0].id }}"
- - name: backendaddrpool1
- load_balancer: "lb{{ rpfx }}"
- register: output
-
-- assert:
- that:
- - output.changed
- - output.state.dns_settings.dns_servers == ['8.9.10.11', '7.8.9.10']
- - output.state.enable_ip_forwarding
- - output.state.network_security_group.name == "tn{{ rpfx }}sg"
- - output.state.enable_accelerated_networking
-
-- name: Complicated NIC (idempontent)
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "tn{{ rpfx }}"
- virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
- enable_accelerated_networking: True
- security_group: "tn{{ rpfx }}sg"
- enable_ip_forwarding: True
- dns_servers:
- - 8.9.10.11
- - 7.8.9.10
- ip_configurations:
- - name: "{{ facts.networkinterfaces[0].ip_configurations[0].name }}"
- private_ip_address: "{{ facts.networkinterfaces[0].ip_configurations[0].private_ip_address }}"
- private_ip_allocation_method: "{{ facts.networkinterfaces[0].ip_configurations[0].private_ip_allocation_method }}"
- primary: "{{ facts.networkinterfaces[0].ip_configurations[0].primary }}"
- - name: ipconfig1
- public_ip_name: "tn{{ rpfx }}"
- load_balancer_backend_address_pools:
- - "{{ lb.state.backend_address_pools[0].id }}"
- - name: backendaddrpool1
- load_balancer: "lb{{ rpfx }}"
- register: output
-
-- assert:
- that:
- - not output.changed
-
-- name: Get fact of the new created NIC
- azure_rm_networkinterface_info:
- resource_group: "{{ resource_group }}"
- name: "tn{{ rpfx }}"
- register: facts
-
-- assert:
- that:
- - "facts.networkinterfaces | length == 1"
- - facts.networkinterfaces[0].id == output.state.id
- - "facts.networkinterfaces[0].ip_configurations | length == 2"
- - 'facts.networkinterfaces[0].security_group.endswith("tn{{ rpfx }}sg")'
- - facts.networkinterfaces[0].enable_accelerated_networking
- - facts.networkinterfaces[0].enable_ip_forwarding
-
-- name: Remove one dns server and ip configuration
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "tn{{ rpfx }}"
- virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
- security_group: "tn{{ rpfx }}sg"
- enable_accelerated_networking: True
- enable_ip_forwarding: True
- dns_servers:
- - 8.9.10.11
- ip_configurations:
- - name: ipconfig1
- public_ip_name: "tn{{ rpfx }}"
- primary: True
- load_balancer_backend_address_pools:
- - "{{ lb.state.backend_address_pools[0].id }}"
- - name: backendaddrpool1
- load_balancer: "lb{{ rpfx }}"
- register: output
-
-- assert:
- that:
- - output.changed
- - output.state.dns_settings.dns_servers == ['8.9.10.11']
- - output.state.enable_ip_forwarding
- - output.state.network_security_group.name == "tn{{ rpfx }}sg"
- - "output.state.ip_configurations | length == 1"
- - output.state.ip_configurations[0].public_ip_address.name == "tn{{ rpfx }}"
- - output.state.enable_accelerated_networking
-
-- name: Create application security group(check mode)
- azure_rm_applicationsecuritygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ applicationsecuritygroup_name1 }}"
- tags:
- testing: testing
- check_mode: yes
- register: output
-
-- name: Assert check mode creation
- assert:
- that:
- - output.changed
-
-- name: Create Application security group
- azure_rm_applicationsecuritygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ applicationsecuritygroup_name1 }}"
- tags:
- testing: testing
- register: output
-
-- name: Assert application security group creation
- assert:
- that:
- - output.changed
- - output.id != ''
-
-- name: Get Application security group
- azure_rm_applicationsecuritygroup_info:
- resource_group: "{{ resource_group }}"
- name: "{{ applicationsecuritygroup_name1 }}"
- register: facts
-
-- name: Assert facts
- assert:
- that:
- - facts['applicationsecuritygroups'] | length == 1
- - facts['applicationsecuritygroups'][0]['name'] != None
- - facts['applicationsecuritygroups'][0]['location'] != None
- - facts['applicationsecuritygroups'][0]['provisioning_state'] != None
-
-- name: Create application security group (idempotent)
- azure_rm_applicationsecuritygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ applicationsecuritygroup_name1 }}"
- tags:
- testing: testing
- register: output
-
-- name: Assert idempotent
- assert:
- that:
- - not output.changed
-
-- name: Update application security group
- azure_rm_applicationsecuritygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ applicationsecuritygroup_name1 }}"
- tags:
- testing: testing
- foo: bar
- register: output
-
-- name: Assert update
- assert:
- that:
- - output.changed
-
-- name: Create Application security group in secondary resource group
- azure_rm_applicationsecuritygroup:
- resource_group: "{{ resource_group_secondary }}"
- name: "{{ applicationsecuritygroup_name2 }}"
- register: asg
-
-- name: Create Nic with application security groups
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "{{ nic_name1 }}"
- virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
- create_with_security_group: True
- public_ip: False
- ip_configurations:
- - name: ipconfig1
- application_security_groups:
- - "{{ applicationsecuritygroup_name1 }}"
- - "{{ asg.id }}"
- primary: True
- register: output
-
-- name: assert creation succeeded
- assert:
- that:
- - output.changed
-
-- name: Create Nic with application security groups (idempotent)
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "{{ nic_name1 }}"
- virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
- create_with_security_group: True
- public_ip: False
- ip_configurations:
- - name: ipconfig1
- application_security_groups:
- - "{{ asg.id }}"
- - "{{ applicationsecuritygroup_name1 }}"
- primary: True
- register: output
-
-- name: assert idempotent
- assert:
- that:
- - not output.changed
-
-- name: Update Nic with application security groups
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "{{ nic_name1 }}"
- virtual_network: "{{ vn.state.id }}"
- subnet: "tn{{ rpfx }}"
- create_with_security_group: True
- public_ip: False
- ip_configurations:
- - name: ipconfig1
- application_security_groups:
- - "{{ applicationsecuritygroup_name1 }}"
- primary: True
- register: output
-
-- name: assert update succeeded
- assert:
- that:
- - output.changed
-
-- name: Get fact of the new created NIC
- azure_rm_networkinterface_info:
- resource_group: "{{ resource_group }}"
- name: "{{ nic_name1 }}"
- register: facts
-
-- assert:
- that:
- - "facts.networkinterfaces[0].ip_configurations[0].application_security_groups | length == 1"
-
-- name: Delete the NIC (check mode)
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "tn{{ rpfx }}"
- state: absent
- check_mode: yes
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Delete the NIC
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "tn{{ rpfx }}"
- state: absent
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Delete the NIC (idempotent)
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "tn{{ rpfx }}"
- state: absent
- register: output
-
-- assert:
- that:
- - not output.changed
-
-- name: delete load balancer
- azure_rm_loadbalancer:
- resource_group: '{{ resource_group }}'
- name: "lb{{ rpfx }}"
- state: absent
-
-- name: delete public ip
- azure_rm_publicipaddress:
- name: "pip{{ rpfx }}"
- resource_group: '{{ resource_group }}'
- state: absent
-
-- name: Delete the NIC
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "{{ nic_name1 }}"
- state: absent
-
-- name: Delete the application security group (check mode)
- azure_rm_applicationsecuritygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ applicationsecuritygroup_name1 }}"
- state: absent
- check_mode: yes
- register: output
-
-- name: Assert delete check mode
- assert:
- that:
- - output.changed
-
-- name: Delete the application security group
- azure_rm_applicationsecuritygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ applicationsecuritygroup_name1 }}"
- state: absent
- register: output
-
-- name: Assert the deletion
- assert:
- that:
- - output.changed
-
-- name: Delete second application security group
- azure_rm_applicationsecuritygroup:
- resource_group: "{{ resource_group_secondary }}"
- name: "{{ applicationsecuritygroup_name2 }}"
- state: absent
- register: output \ No newline at end of file
diff --git a/test/integration/targets/azure_rm_postgresqlserver/aliases b/test/integration/targets/azure_rm_postgresqlserver/aliases
deleted file mode 100644
index ad065181b3..0000000000
--- a/test/integration/targets/azure_rm_postgresqlserver/aliases
+++ /dev/null
@@ -1,10 +0,0 @@
-cloud/azure
-destructive
-shippable/azure/group11
-azure_rm_postgresqlserver_facts
-azure_rm_postgresqldatabase
-azure_rm_postgresqldatabase_facts
-azure_rm_postgresqlfirewallrule
-azure_rm_postgresqlfirewallrule_facts
-azure_rm_postgresqlserverconfiguration
-azure_rm_postgresqlserverconfiguration_facts
diff --git a/test/integration/targets/azure_rm_postgresqlserver/meta/main.yml b/test/integration/targets/azure_rm_postgresqlserver/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_postgresqlserver/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_postgresqlserver/tasks/main.yml b/test/integration/targets/azure_rm_postgresqlserver/tasks/main.yml
deleted file mode 100644
index 9464e5d5bf..0000000000
--- a/test/integration/targets/azure_rm_postgresqlserver/tasks/main.yml
+++ /dev/null
@@ -1,610 +0,0 @@
-- name: Prepare random number
- set_fact:
- rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-- name: Create instance of PostgreSQL Server -- check mode
- azure_rm_postgresqlserver:
- resource_group: "{{ resource_group }}"
- name: postgresqlsrv{{ rpfx }}
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 51200
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- check_mode: yes
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create instance of PostgreSQL Server
- azure_rm_postgresqlserver:
- resource_group: "{{ resource_group }}"
- name: postgresqlsrv{{ rpfx }}
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 51200
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
- - output.state == 'Ready'
-
-- name: Create again instance of PostgreSQL Server
- azure_rm_postgresqlserver:
- resource_group: "{{ resource_group }}"
- name: postgresqlsrv{{ rpfx }}
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 51200
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed == false
- - output.state == 'Ready'
-
-- name: Update instance of PostgreSQL Server, change storage size
- azure_rm_postgresqlserver:
- resource_group: "{{ resource_group }}"
- name: postgresqlsrv{{ rpfx }}
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 128000
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed
- - output.state == 'Ready'
-- debug:
- var: output
-
-- name: Gather facts postgresql Server
- azure_rm_postgresqlserver_facts:
- resource_group: "{{ resource_group }}"
- name: postgresqlsrv{{ rpfx }}
- register: output
-- name: Assert that storage size is correct
- assert:
- that:
- - output.servers[0]['storage_mb'] == 128000
-
-- name: Create second instance of PostgreSQL Server
- azure_rm_postgresqlserver:
- resource_group: "{{ resource_group }}"
- name: postgresqlsrv{{ rpfx }}second
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 51200
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- tags:
- aaa: bbb
-
-- name: Create second instance of PostgreSQL Server -- add tags
- azure_rm_postgresqlserver:
- resource_group: "{{ resource_group }}"
- name: postgresqlsrv{{ rpfx }}second
- sku:
- name: B_Gen5_1
- tier: Basic
- location: westus2
- storage_mb: 51200
- enforce_ssl: True
- admin_username: zimxyz
- admin_password: Testpasswordxyz12!
- tags:
- ccc: ddd
-
-- name: Gather facts PostgreSQL Server
- azure_rm_postgresqlserver_facts:
- resource_group: "{{ resource_group }}"
- name: postgresqlsrv{{ rpfx }}second
- register: output
-
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.servers[0]['id'] != None
- - output.servers[0]['name'] != None
- - output.servers[0]['location'] != None
- - output.servers[0]['sku']['name'] != None
- - output.servers[0]['sku']['tier'] != None
- - output.servers[0]['sku']['capacity'] != None
- - output.servers[0]['version'] != None
- - output.servers[0]['user_visible_state'] != None
- - output.servers[0]['fully_qualified_domain_name'] != None
- - output.servers[0]['tags']['aaa'] == 'bbb'
- - output.servers[0]['tags']['ccc'] == 'ddd'
-
-- name: Gather facts PostgreSQL Server
- azure_rm_postgresqlserver_facts:
- resource_group: "{{ resource_group }}"
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.servers[0]['id'] != None
- - output.servers[0]['name'] != None
- - output.servers[0]['location'] != None
- - output.servers[0]['sku']['name'] != None
- - output.servers[0]['sku']['tier'] != None
- - output.servers[0]['sku']['capacity'] != None
- - output.servers[0]['version'] != None
- - output.servers[0]['user_visible_state'] != None
- - output.servers[0]['fully_qualified_domain_name'] != None
- - output.servers[1]['id'] != None
- - output.servers[1]['name'] != None
- - output.servers[1]['location'] != None
- - output.servers[1]['sku']['name'] != None
- - output.servers[1]['sku']['tier'] != None
- - output.servers[1]['sku']['capacity'] != None
- - output.servers[1]['version'] != None
- - output.servers[1]['user_visible_state'] != None
- - output.servers[1]['fully_qualified_domain_name'] != None
-
-#
-# azure_rm_postgresqldatabase tests below
-#
-- name: Create instance of PostgreSQL Database -- check mode
- azure_rm_postgresqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: testdatabase
- charset: UTF8
- collation: English_United States.1252
- check_mode: yes
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create instance of PostgreSQL Database
- azure_rm_postgresqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: testdatabase
- charset: UTF8
- collation: English_United States.1252
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
- - output.name == 'testdatabase'
-
-- name: Create again instance of PostgreSQL Database
- azure_rm_postgresqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: testdatabase
- charset: UTF8
- collation: English_United States.1252
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed == false
- - output.name == 'testdatabase'
-
-- name: Try to update PostgreSQL Database without force_update
- azure_rm_postgresqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: testdatabase
- charset: WIN1252
- collation: SQL_Latin1_General_CP1_CS_AS
- ignore_errors: yes
- register: output
-- name: Assert that nothing has changed
- assert:
- that:
- - output.changed == False
-
-- name: Try to update PostgreSQL Database with force_update
- azure_rm_postgresqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: testdatabase
- charset: WIN1252
- collation: SQL_Latin1_General_CP1_CS_AS
- force_update: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
- - output.name == 'testdatabase'
-
-- name: Create second instance of PostgreSQL Database
- azure_rm_postgresqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: testdatabase2
-
-- name: Gather facts PostgreSQL Database
- azure_rm_postgresqldatabase_facts:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: testdatabase
- register: output
-
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.databases[0]['server_name'] != None
- - output.databases[0]['name'] != None
- - output.databases[0]['charset'] != None
- - output.databases[0]['collation'] != None
-
-- name: Gather facts PostgreSQL Database
- azure_rm_postgresqldatabase_facts:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.databases[0]['server_name'] != None
- - output.databases[0]['name'] != None
- - output.databases[0]['charset'] != None
- - output.databases[0]['collation'] != None
- - output.databases[1]['server_name'] != None
- - output.databases[1]['name'] != None
- - output.databases[1]['charset'] != None
- - output.databases[1]['collation'] != None
-
-- name: Delete instance of PostgreSQL Database -- check mode
- azure_rm_postgresqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: testdatabase
- state: absent
- check_mode: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete instance of PostgreSQL Database
- azure_rm_postgresqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: testdatabase
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete unexisting instance of PostgreSQL Database
- azure_rm_postgresqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: testdatabase
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed == false
-
-#
-# azure_rm_postgresqlfirewallrule
-#
-
-- name: Create instance of Firewall Rule -- check mode
- azure_rm_postgresqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- start_ip_address: 172.28.10.136
- end_ip_address: 172.28.10.138
- check_mode: yes
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create instance of Firewall Rule
- azure_rm_postgresqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- start_ip_address: 172.28.10.136
- end_ip_address: 172.28.10.138
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create again instance of Firewall Rule
- azure_rm_postgresqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- start_ip_address: 172.28.10.136
- end_ip_address: 172.28.10.138
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed == false
-
-- name: Create Firewall Rule - second
- azure_rm_postgresqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}second
- start_ip_address: 172.28.10.136
- end_ip_address: 172.28.10.138
-
-- name: Gather facts PostgreSQL Firewall Rule
- azure_rm_postgresqlfirewallrule_facts:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.rules[0].id != None
- - output.rules[0].server_name != None
- - output.rules[0].name != None
- - output.rules[0].start_ip_address != None
- - output.rules[0].end_ip_address != None
- - "output.rules | length == 1"
-
-- name: Gather facts PostgreSQL Firewall Rule
- azure_rm_postgresqlfirewallrule_facts:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.rules[0].id != None
- - output.rules[0].server_name != None
- - output.rules[0].name != None
- - output.rules[0].start_ip_address != None
- - output.rules[0].end_ip_address != None
- - output.rules[1].id != None
- - output.rules[1].name != None
- - output.rules[1].start_ip_address != None
- - output.rules[1].end_ip_address != None
- - "output.rules | length == 2"
-
-- name: Delete instance of Firewall Rule -- check mode
- azure_rm_postgresqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- state: absent
- check_mode: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete instance of Firewall Rule
- azure_rm_postgresqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete unexisting instance of Firewall Rule
- azure_rm_postgresqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed == false
-
-- name: Delete instance of Firewall Rule - second
- azure_rm_postgresqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}second
- state: absent
-
-- name: Gather facts PostgreSQL Firewall Rule
- azure_rm_postgresqlfirewallrule_facts:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: firewallrule{{ rpfx }}
- register: output
-- name: Assert that empty list was returned
- assert:
- that:
- - output.changed == False
- - "output.rules | length == 0"
-
-#
-# azure_rm_postgresql_configuration
-#
-- name: Create instance of Configuration -- check mode
- azure_rm_postgresqlconfiguration:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: deadlock_timeout
- value: 2000
- check_mode: yes
- register: output
-- name: Assert that change was registered
- assert:
- that:
- - output.changed
-
-- name: Try to change default configuration
- azure_rm_postgresqlconfiguration:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: deadlock_timeout
- value: 2000
- register: output
-- name: Assert that change was registered
- assert:
- that:
- - output.changed
-
-- name: Try to change default configuration -- idempotent
- azure_rm_postgresqlconfiguration:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: deadlock_timeout
- value: 2000
- register: output
-- name: Assert that change was not registered
- assert:
- that:
- - not output.changed
-
-- name: Try to reset configuration
- azure_rm_postgresqlconfiguration:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: deadlock_timeout
- state: absent
- register: output
-- name: Assert that change was registered
- assert:
- that:
- - output.changed
-
-- name: Try to reset configuration -- idempotent
- azure_rm_postgresqlconfiguration:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: deadlock_timeout
- state: absent
- register: output
-- name: Assert that change was registered
- assert:
- that:
- - not output.changed
-
-- name: Gather facts PostgreSQL Configuration
- azure_rm_postgresqlconfiguration_facts:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- name: deadlock_timeout
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.settings[0].id != None
- - output.settings[0].name != None
- - output.settings[0].value != None
- - output.settings[0].description != None
- - output.settings[0].source != None
- - output.settings | length == 1
-
-- name: Gather facts PostgreSQL Configuration
- azure_rm_postgresqlconfiguration_facts:
- resource_group: "{{ resource_group }}"
- server_name: postgresqlsrv{{ rpfx }}
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.settings[0].id != None
- - output.settings[0].name != None
- - output.settings[0].value != None
- - output.settings[0].description != None
- - output.settings[0].source != None
- - output.settings | length > 1
-
-#
-# azure_rm_postgresqlserver continuation / clean up
-#
-
-- name: Delete instance of PostgreSQL Server -- check mode
- azure_rm_postgresqlserver:
- resource_group: "{{ resource_group }}"
- name: postgresqlsrv{{ rpfx }}
- state: absent
- check_mode: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete instance of PostgreSQL Server
- azure_rm_postgresqlserver:
- resource_group: "{{ resource_group }}"
- name: postgresqlsrv{{ rpfx }}
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete unexisting instance of PostgreSQL Server
- azure_rm_postgresqlserver:
- resource_group: "{{ resource_group }}"
- name: postgresqlsrv{{ rpfx }}
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed == false
-
-- name: Delete second instance of PostgreSQL Server
- azure_rm_postgresqlserver:
- resource_group: "{{ resource_group }}"
- name: postgresqlsrv{{ rpfx }}second
- state: absent
- async: 400
- poll: 0
diff --git a/test/integration/targets/azure_rm_publicipaddress/aliases b/test/integration/targets/azure_rm_publicipaddress/aliases
deleted file mode 100644
index 7b5d7f9851..0000000000
--- a/test/integration/targets/azure_rm_publicipaddress/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group2
-destructive
-azure_rm_publicipaddress_facts
diff --git a/test/integration/targets/azure_rm_publicipaddress/meta/main.yml b/test/integration/targets/azure_rm_publicipaddress/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_publicipaddress/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_publicipaddress/tasks/main.yml b/test/integration/targets/azure_rm_publicipaddress/tasks/main.yml
deleted file mode 100644
index a5f929b0a3..0000000000
--- a/test/integration/targets/azure_rm_publicipaddress/tasks/main.yml
+++ /dev/null
@@ -1,113 +0,0 @@
-- name: Create domain name
- set_fact:
- domain_name: "ansible-{{ resource_group | hash('md5') | truncate(24, True, '') }}"
- rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
-
-- name: Remove public ip
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- name: "pip{{ rpfx }}"
- state: absent
-
-- name: Create public ip
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- name: "pip{{ rpfx }}"
- allocation_method: Static
- domain_name: "{{ domain_name }}"
- tags:
- testing: testing
- delete: on-exit
- register: output
-
-- assert:
- that:
- - output.state.public_ip_allocation_method == 'static'
- - output.state.dns_settings.domain_name_label == domain_name
- - output.state.tags | length == 2
- - output.state.tags.testing == 'testing'
-
-- name: Should be idempotent
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- name: "pip{{ rpfx }}"
- allocation_method: static
- domain_name: "{{ domain_name }}"
- register: output
-
-- assert:
- that: not output.changed
-
-- name: Update tags
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- name: "pip{{ rpfx }}"
- allocation_method: static
- domain_name: "{{ domain_name }}"
- append_tags: yes
- tags:
- delete: never
- foo: bar
- register: output
-
-- assert:
- that:
- - output.state.tags | length == 3
- - output.state.tags.delete == 'never'
-
-- name: Gather facts, filtering by tag
- azure_rm_publicipaddress_facts:
- resource_group: "{{ resource_group }}"
- tags:
- - testing
- - foo:bar
-
-- assert:
- that: azure_publicipaddresses | length == 1
-
-- name: Purge all tags
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- name: "pip{{ rpfx }}"
- allocation_method: static
- domain_name: "{{ domain_name }}"
- append_tags: no
- register: output
-
-- assert:
- that:
- - output.state.tags | length == 0
-
-- name: Gather facts for a public ip
- azure_rm_publicipaddress_facts:
- resource_group: "{{ resource_group }}"
- name: "pip{{ rpfx }}"
- register: pip
-
-- assert:
- that:
- - "pip.publicipaddresses | length == 1"
- - pip.publicipaddresses[0].name == "pip{{ rpfx }}"
- - pip.publicipaddresses[0].allocation_method == 'static'
- - pip.publicipaddresses[0].dns_settings.domain_name_label == domain_name
-
-- name: Gather facts for all public ips
- azure_rm_publicipaddress_facts:
- resource_group: "{{ resource_group }}"
-
-- assert:
- that: azure_publicipaddresses | length > 0
-
-- name: Remove public ip
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- name: "pip{{ rpfx }}"
- state: absent
-
-- name: Gather facts for a public ip
- azure_rm_publicipaddress_facts:
- resource_group: "{{ resource_group }}"
- name: "pip{{ rpfx }}"
-
-- assert:
- that: azure_publicipaddresses | length == 0
diff --git a/test/integration/targets/azure_rm_rediscache/aliases b/test/integration/targets/azure_rm_rediscache/aliases
deleted file mode 100644
index 09552ffcbb..0000000000
--- a/test/integration/targets/azure_rm_rediscache/aliases
+++ /dev/null
@@ -1,5 +0,0 @@
-cloud/azure
-shippable/azure/group2
-destructive
-azure_rm_rediscache_facts
-azure_rm_rediscachefirewallrule
diff --git a/test/integration/targets/azure_rm_rediscache/meta/main.yml b/test/integration/targets/azure_rm_rediscache/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_rediscache/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_rediscache/tasks/main.yml b/test/integration/targets/azure_rm_rediscache/tasks/main.yml
deleted file mode 100644
index dc0c61f39f..0000000000
--- a/test/integration/targets/azure_rm_rediscache/tasks/main.yml
+++ /dev/null
@@ -1,317 +0,0 @@
-- name: Fix resource prefix
- set_fact:
- redis_name: "redis-{{ resource_group | hash('md5') | truncate(7, True, '') }}-{{ 1000 | random }}"
- vnet_name: "vnet-{{ resource_group | hash('md5') | truncate(7, True, '') }}-{{ 1000 | random }}"
- subnet_name: "subnet-{{ resource_group | hash('md5') | truncate(7, True, '') }}-{{ 1000 | random }}"
- rule_name: "rule1"
- run_once: yes
-
-- name: Create a redis cache (Check Mode)
- azure_rm_rediscache:
- resource_group: "{{ resource_group }}"
- name: "{{ redis_name }}"
- sku:
- name: basic
- size: C1
- wait_for_provisioning: False
- check_mode: yes
- register: output
-
-- name: Assert creating redis cache check mode
- assert:
- that:
- - output.changed
-
-- name: Create a redis cache
- azure_rm_rediscache:
- resource_group: "{{ resource_group }}"
- name: "{{ redis_name }}"
- sku:
- name: basic
- size: C1
- wait_for_provisioning: False
- register: output
-
-- name: Assert creating redis cache
- assert:
- that:
- - output.changed
- - output.id
-
-- name: Get facts
- azure_rm_rediscache_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ redis_name }}"
- register: facts
-
-- name: Assert facts
- assert:
- that:
- - facts.rediscaches | length == 1
- - facts.rediscaches[0].id != None
- - facts.rediscaches[0].host_name != None
- - facts.rediscaches[0].provisioning_state != None
- - facts.rediscaches[0].sku.name == 'basic'
- - facts.rediscaches[0].sku.size == 'C1'
-
-- name: Update the redis cache (idempotent)
- azure_rm_rediscache:
- resource_group: "{{ resource_group }}"
- name: "{{ redis_name }}"
- sku:
- name: basic
- size: C1
- wait_for_provisioning: False
- register: output
-
-- name: assert output not changed
- assert:
- that:
- - not output.changed
-
-- name: long-running rediscache tests [run with `--tags long_run,untagged` to enable]
- # creating redis Cache costs about 20 mins async operation,
- # need to poll status from Creating to Running, then able to perform updating/deleting operation,
- # otherwise, will met error:
- # "The resource '<resource_id>' is busy processing a previous update request or is undergoing system maintenance.
- # As such, it is currently unable to accept the update request. Please try again later."
- block:
- - name: Wait for Redis provisioning to complete
- azure_rm_rediscache_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ redis_name }}"
- register: facts
- until: facts.rediscaches[0]['provisioning_state'] == 'Succeeded'
- retries: 30
- delay: 60
-
- - name: (actually) update redis cache
- azure_rm_rediscache:
- resource_group: "{{ resource_group }}"
- name: "{{ redis_name }}"
- sku:
- name: basic
- size: C1
- enable_non_ssl_port: true
- tags:
- testing: foo
- wait_for_provisioning: True
- register: output
-
- - name: assert output changed
- assert:
- that:
- - output.changed
-
- - name: Update redis cache configuration
- azure_rm_rediscache:
- resource_group: "{{ resource_group }}"
- name: "{{ redis_name }}"
- sku:
- name: basic
- size: C1
- enable_non_ssl_port: True
- maxmemory_policy: allkeys_lru
- tags:
- testing: foo
- register: output
-
- - name: assert output changed
- assert:
- that:
- - output.changed
-
- - name: Scale up the redis cache
- azure_rm_rediscache:
- resource_group: "{{ resource_group }}"
- name: "{{ redis_name }}"
- sku:
- name: standard
- size: C1
- tags:
- testing: foo
- wait_for_provisioning: True
- register: output
-
- - assert:
- that:
- - output.changed
-
- - name: Force reboot redis cache
- azure_rm_rediscache:
- resource_group: "{{ resource_group }}"
- name: "{{ redis_name }}"
- reboot:
- reboot_type: all
- register: output
-
- - assert:
- that:
- - output.changed
-
- - name: Delete the redis cache (Check Mode)
- azure_rm_rediscache:
- resource_group: "{{ resource_group }}"
- name: "{{ redis_name }}"
- state: absent
- check_mode: yes
- register: output
-
- - name: assert deleting redis cache check mode
- assert:
- that: output.changed
-
- - name: Delete the redis cache
- azure_rm_rediscache:
- resource_group: "{{ resource_group }}"
- name: "{{ redis_name }}"
- state: absent
- register: output
-
- - assert:
- that:
- - output.changed
- tags: [long_run, never]
-
-
-- name: Create virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: "{{ vnet_name }}"
- address_prefixes: "10.10.0.0/16"
-
-- name: Add subnet
- azure_rm_subnet:
- resource_group: "{{ resource_group }}"
- name: "{{ subnet_name }}"
- address_prefix: "10.10.0.0/24"
- virtual_network: "{{ vnet_name }}"
-
-- name: Create redis with subnet
- azure_rm_rediscache:
- resource_group: "{{ resource_group }}"
- name: "{{ redis_name }}2"
- sku:
- name: premium
- size: P1
- subnet:
- name: "{{ subnet_name }}"
- virtual_network_name: "{{ vnet_name }}"
- wait_for_provisioning: False
- register: output
-
-- name: Assert creating redis cache
- assert:
- that:
- - output.changed
- - output.id
-
-- name: Get facts
- azure_rm_rediscache_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ redis_name }}2"
- return_access_keys: True
- register: facts
-
-- name: Assert facts
- assert:
- that:
- - facts.rediscaches | length == 1
- - facts.rediscaches[0].subnet != None
- - facts.rediscaches[0].access_keys.primary != None
-
-- name: Create firewall rule (Check mode)
- azure_rm_rediscachefirewallrule:
- resource_group: "{{ resource_group }}"
- cache_name: "{{ redis_name }}2"
- name: "{{ rule_name }}"
- start_ip_address: 192.168.1.1
- end_ip_address: 192.168.1.4
- check_mode: yes
- register: output
-
-- name: Assert check mode creation
- assert:
- that:
- - output.changed
-
-
-- name: long-running rediscachefirewallrule tests [run with `--tags long_run,untagged` to enable]
-# Creating firewall rule need Redis status is running, while creating redis Cache costs about 20 mins async operation,
-# need to poll status from Creating to Running, then able to perform firewall rule creating,
-# otherwise, will met error:
-# "Error creating/updating Firewall rule of Azure Cache for Redis: Azure Error: Conflict\nMessage: The resource
-# '/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Cache/Redis/myRedis' is busy processing
-# a previous update request or is undergoing system maintenance. As such, it is currently unable to accept the update request. Please try again later."
- block:
- - name: Wait for Redis provisioning to complete
- azure_rm_rediscache_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ redis_name }}2"
- register: facts
- until: facts.rediscaches[0]['provisioning_state'] == 'Succeeded'
- retries: 30
- delay: 60
-
- - name: Create firewall rule
- azure_rm_rediscachefirewallrule:
- resource_group: "{{ resource_group }}"
- cache_name: "{{ redis_name }}2"
- name: "{{ rule_name }}"
- start_ip_address: 192.168.1.1
- end_ip_address: 192.168.1.4
- register: output
-
- - name: Assert creation
- assert:
- that:
- - output.changed
- - output.id
-
- - name: Update firewall rule idempotence
- azure_rm_rediscachefirewallrule:
- resource_group: "{{ resource_group }}"
- cache_name: "{{ redis_name }}2"
- name: "{{ rule_name }}"
- start_ip_address: 192.168.1.1
- end_ip_address: 192.168.1.4
- register: output
-
- - name: Assert idempotence
- assert:
- that:
- - output.changed == False
-
- - name: Update firewall rule
- azure_rm_rediscachefirewallrule:
- resource_group: "{{ resource_group }}"
- cache_name: "{{ redis_name }}2"
- name: "{{ rule_name }}"
- end_ip_address: 192.168.1.5
- register: output
-
- - name: Assert updating
- assert:
- that:
- - output.changed
-
- - name: Delete firewall rule
- azure_rm_rediscachefirewallrule:
- resource_group: "{{ resource_group }}"
- cache_name: "{{ redis_name }}2"
- name: "{{ rule_name }}"
- state: absent
- register: output
-
- - name: Assert deletion
- assert:
- that:
- - output.changed
-
- - name: Delete the redis cache
- azure_rm_rediscache:
- resource_group: "{{ resource_group }}"
- name: "{{ redis_name }}2"
- state: absent
- register: output
- tags: [long_run, never] \ No newline at end of file
diff --git a/test/integration/targets/azure_rm_resource/aliases b/test/integration/targets/azure_rm_resource/aliases
deleted file mode 100644
index 49acfee76c..0000000000
--- a/test/integration/targets/azure_rm_resource/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/azure
-destructive
-shippable/azure/group2
diff --git a/test/integration/targets/azure_rm_resource/meta/main.yml b/test/integration/targets/azure_rm_resource/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_resource/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_resource/tasks/main.yml b/test/integration/targets/azure_rm_resource/tasks/main.yml
deleted file mode 100644
index 7c3024a5ef..0000000000
--- a/test/integration/targets/azure_rm_resource/tasks/main.yml
+++ /dev/null
@@ -1,158 +0,0 @@
-- name: Prepare random number
- set_fact:
- nsgname: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- storageaccountname: "stacc{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- dbname: "mdb{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-- name: Call REST API
- azure_rm_resource:
- api_version: '2018-02-01'
- resource_group: "{{ resource_group }}"
- provider: network
- resource_type: networksecuritygroups
- resource_name: "{{ nsgname }}"
- body:
- location: eastus
- idempotency: yes
- register: output
-
-- name: Assert that something has changed
- assert:
- that: output.changed
-
-- name: Call REST API
- azure_rm_resource:
- api_version: '2018-02-01'
- resource_group: "{{ resource_group }}"
- provider: network
- resource_type: networksecuritygroups
- resource_name: "{{ nsgname }}"
- body:
- location: eastus
- idempotency: yes
- register: output
-
-- name: Assert that nothing has changed
- assert:
- that: not output.changed
-
-- name: Call REST API
- azure_rm_resource:
- api_version: '2018-02-01'
- resource_group: "{{ resource_group }}"
- provider: network
- resource_type: networksecuritygroups
- resource_name: "{{ nsgname }}"
- body:
- location: eastus
- tags:
- a: "abc"
- b: "cde"
- idempotency: yes
- register: output
-
-- name: Assert that something has changed
- assert:
- that: output.changed
-
-- name: Try to get information about account
- azure_rm_resource_facts:
- api_version: '2018-02-01'
- resource_group: "{{ resource_group }}"
- provider: network
- resource_type: networksecuritygroups
- resource_name: "{{ nsgname }}"
- register: output
-
-- name: Assert value was returned
- assert:
- that:
- - not output.changed
- - output.response[0]['name'] != None
- - output.response | length == 1
-
-- name: Try to query a list
- azure_rm_resource_facts:
- api_version: '2018-02-01'
- resource_group: "{{ resource_group }}"
- provider: network
- resource_type: networksecuritygroups
- register: output
-- name: Assert value was returned
- assert:
- that:
- - not output.changed
- - output.response[0]['name'] != None
- - output.response | length >= 1
-
-- name: Try to query a list - same without API version
- azure_rm_resource_facts:
- resource_group: "{{ resource_group }}"
- provider: network
- resource_type: networksecuritygroups
- register: output
-- name: Assert value was returned
- assert:
- that:
- - not output.changed
- - output.response[0]['name'] != None
- - output.response | length >= 1
-
-- name: Query all the resources in the resource group
- azure_rm_resource_facts:
- resource_group: "{{ resource_group }}"
- resource_type: resources
- register: output
-- name: Assert value was returned
- assert:
- that:
- - not output.changed
- - output.response | length >= 1
-
-- name: Create storage account that requires LRO polling
- azure_rm_resource:
- polling_timeout: 600
- polling_interval: 60
- api_version: '2018-07-01'
- resource_group: "{{ resource_group }}"
- provider: Storage
- resource_type: storageAccounts
- resource_name: "{{ storageaccountname }}"
- body:
- sku:
- name: Standard_GRS
- kind: Storage
- location: eastus
- register: output
-
-- name: Assert that storage was successfully created
- assert:
- that: "output['response']['name'] == '{{ storageaccountname }}'"
-
-
-- name: Try to storage keys -- special case when subresource part has no name
- azure_rm_resource:
- resource_group: "{{ resource_group }}"
- provider: storage
- resource_type: storageAccounts
- resource_name: "{{ storageaccountname }}"
- subresource:
- - type: listkeys
- api_version: '2018-03-01-preview'
- method: POST
- register: keys
-
-- name: Assert that key was returned
- assert:
- that: keys['response']['keys'][0]['value'] | length > 0
-
-- name: Delete storage - without API version
- azure_rm_resource:
- polling_timeout: 600
- polling_interval: 60
- method: DELETE
- resource_group: "{{ resource_group }}"
- provider: Storage
- resource_type: storageAccounts
- resource_name: "{{ storageaccountname }}"
diff --git a/test/integration/targets/azure_rm_resourcegroup/aliases b/test/integration/targets/azure_rm_resourcegroup/aliases
deleted file mode 100644
index 0dbbfcebab..0000000000
--- a/test/integration/targets/azure_rm_resourcegroup/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group3
-destructive
-azure_rm_resourcegroup_info
diff --git a/test/integration/targets/azure_rm_resourcegroup/meta/main.yml b/test/integration/targets/azure_rm_resourcegroup/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_resourcegroup/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_resourcegroup/tasks/main.yml b/test/integration/targets/azure_rm_resourcegroup/tasks/main.yml
deleted file mode 100644
index 238509695f..0000000000
--- a/test/integration/targets/azure_rm_resourcegroup/tasks/main.yml
+++ /dev/null
@@ -1,39 +0,0 @@
-- name: Get resource group info
- azure_rm_resourcegroup_info:
- register: rg
-
-- assert:
- that:
- - rg.resourcegroups | length >= 1
-
-- name: Get resource group info
- azure_rm_resourcegroup_info:
- name: "{{ resource_group }}"
- list_resources: yes
- register: rg
-
-- assert:
- that:
- - rg.resourcegroups | length == 1
- - rg.resourcegroups[0].resources | length >= 0
-
-- name: Create resource group (idempontent)
- azure_rm_resourcegroup:
- name: "{{ resource_group }}"
- location: "{{ rg.resourcegroups[0].location }}"
- register: output
-
-- assert:
- that:
- - not output.changed
-
-- name: delete resource group
- azure_rm_resourcegroup:
- name: "{{ resource_group }}"
- state: absent
- check_mode: yes
- register: output
-
-- assert:
- that:
- - output.changed \ No newline at end of file
diff --git a/test/integration/targets/azure_rm_roledefinition/aliases b/test/integration/targets/azure_rm_roledefinition/aliases
deleted file mode 100644
index 35b9401151..0000000000
--- a/test/integration/targets/azure_rm_roledefinition/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/azure
-destructive
-unsupported \ No newline at end of file
diff --git a/test/integration/targets/azure_rm_roledefinition/meta/main.yml b/test/integration/targets/azure_rm_roledefinition/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_roledefinition/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_roledefinition/tasks/main.yml b/test/integration/targets/azure_rm_roledefinition/tasks/main.yml
deleted file mode 100644
index f024dd5a49..0000000000
--- a/test/integration/targets/azure_rm_roledefinition/tasks/main.yml
+++ /dev/null
@@ -1,207 +0,0 @@
-- name: Fix resource prefix
- set_fact:
- role_name: "{{ (resource_group | replace('-','x'))[-8:] }}{{ 1000 | random }}testrole"
- subscription_id: "{{ lookup('env','AZURE_SUBSCRIPTION_ID') }}"
- principal_id: "{{ lookup('env','AZURE_CLIENT_ID') }}"
- run_once: yes
-
-- name: Create a role definition (Check Mode)
- azure_rm_roledefinition:
- name: "{{ role_name }}"
- scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}"
- permissions:
- - actions:
- - "Microsoft.Compute/virtualMachines/read"
- not_actions:
- - "Microsoft.Compute/virtualMachines/write"
- data_actions:
- - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read"
- not_data_actions:
- - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write"
- assignable_scopes:
- - "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}"
- check_mode: yes
- register: output
-
-- name: Assert creating role definition check mode
- assert:
- that:
- - output.changed
-
-- name: Create a role definition
- azure_rm_roledefinition:
- name: "{{ role_name }}"
- scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}"
- permissions:
- - actions:
- - "Microsoft.Compute/virtualMachines/read"
- not_actions:
- - "Microsoft.Compute/virtualMachines/write"
- data_actions:
- - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read"
- not_data_actions:
- - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write"
- assignable_scopes:
- - "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}"
- register: output
-
-- name: Assert creating role definition
- assert:
- that:
- - output.changed
-
-- name: Get facts by type
- azure_rm_roledefinition_facts:
- scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}"
- type: custom
- register: facts
-
-- name: Assert facts
- assert:
- that:
- - facts['roledefinitions'] | length > 1
-
-- name: Get facts by name
- azure_rm_roledefinition_facts:
- scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}"
- role_name: "{{ role_name }}"
- register: facts
- until: facts.roledefinitions | length > 0
- retries: 50
- delay: 60
-
-- name: Assert facts
- assert:
- that:
- - facts['roledefinitions'] | length == 1
- - facts['roledefinitions'][0]['permissions'] | length == 1
- - facts['roledefinitions'][0]['permissions'][0]['not_data_actions'] | length == 1
- - facts['roledefinitions'][0]['permissions'][0]['data_actions'] | length == 1
-
-- name: Update the role definition (idempotent)
- azure_rm_roledefinition:
- name: "{{ role_name }}"
- scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}"
- permissions:
- - actions:
- - "Microsoft.Compute/virtualMachines/read"
- not_actions:
- - "Microsoft.Compute/virtualMachines/write"
- data_actions:
- - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read"
- not_data_actions:
- - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write"
- assignable_scopes:
- - "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}"
- register: output
-
-- name: assert output not changed
- assert:
- that:
- - not output.changed
-
-- name: Update the role definition
- azure_rm_roledefinition:
- name: "{{ role_name }}"
- scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}"
- permissions:
- - actions:
- - "Microsoft.Compute/virtualMachines/read"
- - "Microsoft.Compute/virtualMachines/start/action"
- not_actions:
- - "Microsoft.Compute/virtualMachines/write"
- data_actions:
- - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/read"
- not_data_actions:
- - "Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write"
- assignable_scopes:
- - "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}"
- register: output
-
-- name: assert output changed
- assert:
- that:
- - output.changed
-
-- name: Get role definition facts
- azure_rm_roledefinition_facts:
- role_name: "{{ role_name }}"
- scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}"
- type: custom
- register: roledef
- until: "{{ roledef.roledefinitions | length > 0 }}"
- retries: 50
- delay: 60
-
-- name: Assert role definition facts
- assert:
- that:
- - roledef['roledefinitions'] | length == 1
- - roledef['roledefinitions'][0]['id']
-
-- name: Create a role assignment (Check Mode)
- azure_rm_roleassignment:
- scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}"
- assignee_object_id: "{{ principal_id }}"
- role_definition_id: "{{ roledef['roledefinitions'][0]['id'] }}"
- check_mode: yes
- register: output
-
-- name: Assert creating role definition check mode
- assert:
- that:
- - output.changed
-
-- name: Create a role assignment
- azure_rm_roleassignment:
- scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}"
- assignee_object_id: "{{ principal_id }}"
- role_definition_id: "{{ roledef['roledefinitions'][0]['id'] }}"
- register: output
-
-- name: Assert creating role assignment
- assert:
- that:
- - output.changed
-
-- name: Get facts
- azure_rm_roleassignment_facts:
- scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}"
- assignee: "{{ principal_id }}"
- role_definition_id: "{{ roledef['roledefinitions'][0]['id'] }}"
- register: facts
-
-- name: assert role assignment facts
- assert:
- that:
- - facts['roleassignments'] | length > 0
- - facts['roleassignments'][0]['id']
-
-- name: delete role assignment
- azure_rm_roleassignment:
- name: "{{ facts['roleassignments'][0]['id'].split('/')[-1] }}"
- scope: "/subscriptions/{{ subscription_id }}"
- state: absent
-
-- name: Delete the role definition (Check Mode)
- azure_rm_roledefinition:
- name: "{{ role_name }}"
- scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}"
- state: absent
- check_mode: yes
- register: output
-
-- name: assert deleting role definition check mode
- assert:
- that: output.changed
-
-- name: Delete the role definition
- azure_rm_roledefinition:
- name: "{{ role_name }}"
- scope: "/subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}"
- state: absent
- register: output
-
-- assert:
- that:
- - output.changed
diff --git a/test/integration/targets/azure_rm_routetable/aliases b/test/integration/targets/azure_rm_routetable/aliases
deleted file mode 100644
index 759eafa2de..0000000000
--- a/test/integration/targets/azure_rm_routetable/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/azure
-shippable/azure/group3
-destructive
diff --git a/test/integration/targets/azure_rm_routetable/meta/main.yml b/test/integration/targets/azure_rm_routetable/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_routetable/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_routetable/tasks/main.yml b/test/integration/targets/azure_rm_routetable/tasks/main.yml
deleted file mode 100644
index 40c4159eac..0000000000
--- a/test/integration/targets/azure_rm_routetable/tasks/main.yml
+++ /dev/null
@@ -1,183 +0,0 @@
-- name: Prepare random number
- set_fact:
- name: "table{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- route_name: "route{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-- name: Create a route table (check mode)
- azure_rm_routetable:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- tags:
- purpose: testing
- check_mode: yes
- register: output
-
-- assert:
- that:
- - not output.id
- - output.changed
-
-- name: Create a route table
- azure_rm_routetable:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- tags:
- purpose: testing
- register: output
-
-- assert:
- that:
- - output.changed
- - output.id
-
-- name: Create a route table (idemponent)
- azure_rm_routetable:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- tags:
- purpose: testing
- register: output
-
-- assert:
- that:
- - not output.changed
-
-- name: Get facts of the table
- azure_rm_routetable_facts:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- register: output
-
-- assert:
- that:
- - "output.route_tables | length == 1"
- - "output.route_tables[0].routes | length == 0"
-
-- name: Create route (check mode)
- azure_rm_route:
- name: "{{ route_name }}"
- resource_group: "{{ resource_group }}"
- next_hop_type: virtual_network_gateway
- address_prefix: "10.1.0.0/16"
- route_table_name: "{{ name }}"
- check_mode: yes
- register: output
-
-- assert:
- that:
- - output.changed
- - not output.id
-
-- name: Create route
- azure_rm_route:
- name: "{{ route_name }}"
- resource_group: "{{ resource_group }}"
- next_hop_type: virtual_network_gateway
- address_prefix: "10.1.0.0/16"
- route_table_name: "{{ name }}"
- register: output
-
-- assert:
- that:
- - output.changed
- - output.id
-
-- name: Create route (idemponent)
- azure_rm_route:
- name: "{{ route_name }}"
- resource_group: "{{ resource_group }}"
- next_hop_type: virtual_network_gateway
- address_prefix: "10.1.0.0/16"
- route_table_name: "{{ name }}"
- register: output
-
-- assert:
- that:
- - not output.changed
-
-- name: update route
- azure_rm_route:
- name: "{{ route_name }}"
- resource_group: "{{ resource_group }}"
- next_hop_type: virtual_network_gateway
- address_prefix: "10.1.0.0/24"
- route_table_name: "{{ name }}"
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Get facts of the table
- azure_rm_routetable_facts:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- register: output
-
-- assert:
- that:
- - "output.route_tables | length == 1"
- - "output.route_tables[0].routes | length == 1"
- - output.route_tables[0].routes[0].address_prefix == '10.1.0.0/24'
-
-- name: Delete route (check mode)
- azure_rm_route:
- name: "{{ route_name }}"
- resource_group: "{{ resource_group }}"
- route_table_name: "{{ name }}"
- state: absent
- check_mode: yes
-
-- name: Delete route
- azure_rm_route:
- name: "{{ route_name }}"
- resource_group: "{{ resource_group }}"
- state: absent
- route_table_name: "{{ name }}"
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Delete route (idemponent)
- azure_rm_route:
- name: "{{ route_name }}"
- resource_group: "{{ resource_group }}"
- state: absent
- route_table_name: "{{ name }}"
- register: output
-
-- assert:
- that:
- - not output.changed
-
-- name: Delete route table (check mode)
- azure_rm_routetable:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- state: absent
- check_mode: yes
-
-- name: Delete route table
- azure_rm_routetable:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- state: absent
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Delete route table (idemponent)
- azure_rm_routetable:
- name: "{{ name }}"
- resource_group: "{{ resource_group }}"
- state: absent
- register: output
-
-- assert:
- that:
- - not output.changed
diff --git a/test/integration/targets/azure_rm_securitygroup/aliases b/test/integration/targets/azure_rm_securitygroup/aliases
deleted file mode 100644
index 4f09ea58b8..0000000000
--- a/test/integration/targets/azure_rm_securitygroup/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group2
-destructive
-azure_rm_securitygroup_facts
diff --git a/test/integration/targets/azure_rm_securitygroup/meta/main.yml b/test/integration/targets/azure_rm_securitygroup/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_securitygroup/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_securitygroup/tasks/main.yml b/test/integration/targets/azure_rm_securitygroup/tasks/main.yml
deleted file mode 100644
index 658c1c9a98..0000000000
--- a/test/integration/targets/azure_rm_securitygroup/tasks/main.yml
+++ /dev/null
@@ -1,302 +0,0 @@
-- name: Prepare random number
- set_fact:
- secgroupname: "sg{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- asg_name1: "asg1{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- asg_name2: "asg2{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- sg_name1: "sgasg{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-
-- name: Create security group
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ secgroupname }}"
- tags:
- testing: testing
- delete: on-exit
- foo: bar
- purge_rules: yes
- rules:
- - name: DenySSH
- protocol: Tcp
- destination_port_range: 22
- access: Deny
- priority: 100
- direction: Inbound
- - name: AllowSSH
- protocol: Tcp
- source_address_prefix: 174.109.158.0/24
- destination_port_range: 22
- access: Allow
- priority: 101
- direction: Inbound
- register: output
-
-- assert: { that: "{{ output.state.rules | length }} == 2" }
-
-- name: Gather facts by tags
- azure_rm_securitygroup_info:
- resource_group: "{{ resource_group }}"
- tags:
- - testing
- - foo:bar
- register: output
-
-- assert:
- that: output.securitygroups | length == 1
-
-- name: Add/Update rules on existing security group
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ secgroupname }}"
- rules:
- - name: AllowSSH
- protocol: Tcp
- source_address_prefix: 174.108.158.0/24
- destination_port_range: 22
- access: Allow
- priority: 101
- - name: AllowSSHFromHome
- protocol: Tcp
- source_address_prefix: 174.109.158.0/24
- destination_port_range: 22-23
- priority: 102
- register: output
-
-- assert:
- that:
- - "{{ output.state.rules | length }} == 3"
- - output.state.rules[0].source_address_prefix == '174.108.158.0/24'
-
-- name: Test idempotence
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ secgroupname }}"
- rules:
- - name: AllowSSH
- protocol: Tcp
- source_address_prefix: 174.108.158.0/24
- destination_port_range: 22
- access: Allow
- priority: 101
- - name: AllowSSHFromHome
- protocol: Tcp
- source_address_prefix: 174.109.158.0/24
- destination_port_range: 22-23
- priority: 102
- register: output
-
-- assert:
- that: not output.changed
-
-- name: Update tags
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ secgroupname }}"
- tags:
- testing: testing
- delete: never
- baz: bar
- append_tags: false
- register: output
-
-- assert:
- that:
- - output.state.tags | length == 3
- - output.state.tags.delete == 'never'
-
-- name: Purge tags
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ secgroupname }}"
- append_tags: false
- tags:
- testing: testing
- delete: on-exit
- register: output
-
-- assert:
- that:
- - output.state.tags | length == 2
- - output.state.tags.delete == 'on-exit'
-
-- name: Gather facts for one accounts
- azure_rm_securitygroup_info:
- resource_group: "{{ resource_group }}"
- name: "{{ secgroupname }}"
- register: output
-
-- assert:
- that:
- - output.securitygroups | length == 1
-
-- name: Gather facts for all accounts
- azure_rm_securitygroup_info:
- resource_group: "{{ resource_group }}"
- register: output_groups
-
-- assert:
- that:
- - output_groups.securitygroups | length > 0
-
-- name: Create security group with source_address_prefixes
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ secgroupname }}"
- tags:
- testing: testing
- delete: on-exit
- foo: bar
- purge_rules: yes
- rules:
- - name: AllowSSH
- protocol: Tcp
- source_address_prefix:
- - 52.100.120.240
- - 53.100.250.190
- - 54.110.200.200
- destination_port_range: 22
- access: Allow
- priority: 101
- direction: Inbound
- register: output
-
-- assert:
- that:
- - "{{ output.state.rules | length }} == 1"
- - "{{ output.state.rules[0].source_address_prefixes | length }} == 3"
- - not output.state.rules[0].source_address_prefix
-
-- name: Create security group with source_address_prefixes(idempontent)
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ secgroupname }}"
- tags:
- testing: testing
- delete: on-exit
- foo: bar
- purge_rules: yes
- rules:
- - name: AllowSSH
- protocol: Tcp
- source_address_prefix:
- - 52.100.120.240
- - 53.100.250.190
- - 54.110.200.200
- destination_port_range: 22
- access: Allow
- priority: 101
- direction: Inbound
- register: output
-
-- assert:
- that: not output.changed
-
-- name: Add a single one group
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ secgroupname }}"
- tags:
- testing: testing
- delete: on-exit
- foo: bar
- rules:
- - name: DenySSH
- protocol: Tcp
- source_address_prefix:
- - 54.120.120.240
- destination_port_range: 22
- access: Deny
- priority: 102
- direction: Inbound
- register: output
-
-- assert:
- that:
- - output.changed
- - "{{ output.state.rules | length }} == 2"
-
-- name: Create Application security group 1
- azure_rm_applicationsecuritygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ asg_name1 }}"
- tags:
- testing: testing
- register: asg1
-
-- name: Create Application security group 2
- azure_rm_applicationsecuritygroup:
- resource_group: "{{ resource_group_secondary }}"
- name: "{{ asg_name2 }}"
- tags:
- testing: testing
- register: asg2
-
-- name: Create security group with application security group
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ sg_name1 }}"
- purge_rules: yes
- rules:
- - name: AsgToAsg
- protocol: Tcp
- source_application_security_groups:
- - "{{ asg1.id }}"
- destination_application_security_groups:
- - resource_group: "{{ resource_group_secondary }}"
- name: "{{ asg_name2 }}"
- destination_port_range: 22
- access: Allow
- priority: 101
- direction: Inbound
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Create security group with application security group - Idempotent
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ sg_name1 }}"
- purge_rules: yes
- rules:
- - name: AsgToAsg
- protocol: Tcp
- source_application_security_groups:
- - "{{ asg_name1 }}"
- destination_application_security_groups:
- - resource_group: "{{ resource_group_secondary }}"
- name: "{{ asg_name2 }}"
- destination_port_range: 22
- access: Allow
- priority: 101
- direction: Inbound
- register: output
-
-- assert:
- that:
- - not output.changed
-
-
-- name: Delete security group
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ sg_name1 }}"
- state: absent
-
-- name: Delete all security groups
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ item.name }}"
- state: absent
- with_items: "{{ output_groups.securitygroups }}"
-
-- name: Should have no security groups remaining
- azure_rm_securitygroup_info:
- resource_group: "{{ resource_group }}"
- register: output
-
-- assert:
- that:
- - output.securitygroups | length == 0
diff --git a/test/integration/targets/azure_rm_servicebus/aliases b/test/integration/targets/azure_rm_servicebus/aliases
deleted file mode 100644
index 239e365779..0000000000
--- a/test/integration/targets/azure_rm_servicebus/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/azure
-shippable/azure/group4
-destructive
diff --git a/test/integration/targets/azure_rm_servicebus/meta/main.yml b/test/integration/targets/azure_rm_servicebus/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_servicebus/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_servicebus/tasks/main.yml b/test/integration/targets/azure_rm_servicebus/tasks/main.yml
deleted file mode 100644
index c12d635a1d..0000000000
--- a/test/integration/targets/azure_rm_servicebus/tasks/main.yml
+++ /dev/null
@@ -1,169 +0,0 @@
-- name: Prepare random number
- set_fact:
- rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-- name: Create a namespace
- azure_rm_servicebus:
- name: "ns{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- register: namespace
-
-- assert:
- that:
- - namespace.id
- - namespace.changed
-
-- name: Create a namespace (idempontent)
- azure_rm_servicebus:
- name: "ns{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- register: namespace
-
-- assert:
- that:
- - not namespace.changed
-
-- name: Create a queue
- azure_rm_servicebusqueue:
- name: "queue{{ rpfx }}"
- namespace: "ns{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- register: queue
-
-- assert:
- that:
- - queue.id
- - queue.changed
-
-- name: Create a topic (check mode)
- azure_rm_servicebustopic:
- name: "topic{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- namespace: "ns{{ rpfx }}"
- duplicate_detection_time_in_seconds: 600
- check_mode: yes
- register: output
-
-- assert:
- that:
- - output.changed
-
-- name: Create a topic
- azure_rm_servicebustopic:
- name: "topic{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- namespace: "ns{{ rpfx }}"
- duplicate_detection_time_in_seconds: 600
- register: output
-
-- assert:
- that:
- - output.changed
- - output.id
- - "'subscription_count' not in output"
-
-- name: Create a topic (idempontent)
- azure_rm_servicebustopic:
- name: "topic{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- namespace: "ns{{ rpfx }}"
- duplicate_detection_time_in_seconds: 600
- register: output
-
-- assert:
- that:
- - not output.changed
-
-- name: Create test policy
- azure_rm_servicebussaspolicy:
- name: testpolicy
- resource_group: "{{ resource_group }}"
- namespace: "ns{{ rpfx }}"
- topic: "topic{{ rpfx }}"
- rights: manage
-
-- name: Create a subscription
- azure_rm_servicebustopicsubscription:
- name: "subs{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- namespace: "ns{{ rpfx }}"
- topic: "topic{{ rpfx }}"
- register: subs
-
-- assert:
- that:
- - subs.id
- - subs.changed
-
-- name: Retrive topic
- azure_rm_servicebus_facts:
- type: topic
- name: "topic{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- namespace: "ns{{ rpfx }}"
- show_sas_policies: yes
- register: facts
-
-- assert:
- that:
- - "facts.servicebuses | length == 1"
- - facts.servicebuses[0].id == output.id
- - facts.servicebuses[0].subscription_count == 1
- - facts.servicebuses[0].sas_policies.testpolicy
- - facts.servicebuses[0].sas_policies.testpolicy.rights == 'manage'
-
-- name: Delete subscription
- azure_rm_servicebustopicsubscription:
- name: "subs{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- namespace: "ns{{ rpfx }}"
- topic: "topic{{ rpfx }}"
- state: absent
-
-- name: Retrive topic
- azure_rm_servicebus_facts:
- type: topic
- name: "topic{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- namespace: "ns{{ rpfx }}"
- show_sas_policies: yes
- register: facts
-
-- assert:
- that:
- - facts.servicebuses[0].subscription_count == 0
- - "facts.servicebuses | length == 1"
-
-- name: Delete topic
- azure_rm_servicebustopic:
- name: "topic{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- namespace: "ns{{ rpfx }}"
- state: absent
-
-- name: Retrive topic
- azure_rm_servicebus_facts:
- name: "topic{{ rpfx }}"
- type: topic
- resource_group: "{{ resource_group }}"
- namespace: "ns{{ rpfx }}"
- show_sas_policies: yes
- register: facts
-
-- assert:
- that:
- - "facts.servicebuses | length == 0"
-
-- name: Delete queue
- azure_rm_servicebusqueue:
- name: "queue{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- namespace: "ns{{ rpfx }}"
- state: absent
-
-- name: Delete namespace
- azure_rm_servicebus:
- name: "ns{{ rpfx }}"
- resource_group: "{{ resource_group }}"
- state: absent \ No newline at end of file
diff --git a/test/integration/targets/azure_rm_sqlserver/aliases b/test/integration/targets/azure_rm_sqlserver/aliases
deleted file mode 100644
index 952e4dac80..0000000000
--- a/test/integration/targets/azure_rm_sqlserver/aliases
+++ /dev/null
@@ -1,8 +0,0 @@
-cloud/azure
-destructive
-shippable/azure/group9
-azure_rm_sqlserver_facts
-azure_rm_sqldatabase
-azure_rm_sqldatabase_facts
-azure_rm_sqlfirewallrule
-azure_rm_sqlfirewallrule_facts
diff --git a/test/integration/targets/azure_rm_sqlserver/meta/main.yml b/test/integration/targets/azure_rm_sqlserver/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_sqlserver/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_sqlserver/tasks/main.yml b/test/integration/targets/azure_rm_sqlserver/tasks/main.yml
deleted file mode 100644
index a85e25efb1..0000000000
--- a/test/integration/targets/azure_rm_sqlserver/tasks/main.yml
+++ /dev/null
@@ -1,419 +0,0 @@
-- name: Prepare random number
- set_fact:
- random_postfix: "{{ 1000 | random }}{{ resource_group | hash('md5') | truncate(7, True, '') }}"
- run_once: yes
-
-- name: Create instance of SQL Server -- check mode
- azure_rm_sqlserver:
- resource_group: "{{ resource_group }}"
- name: "sqlsrv{{ random_postfix }}"
- location: eastus
- admin_username: mylogin
- admin_password: Testpasswordxyz12!
- check_mode: yes
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create instance of SQL Server
- azure_rm_sqlserver:
- resource_group: "{{ resource_group }}"
- name: "sqlsrv{{ random_postfix }}"
- location: eastus
- admin_username: mylogin
- admin_password: Testpasswordxyz12!
- tags:
- aaa: bbb
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
- - output.state == 'Ready'
-
-- name: Create again instance of SQL Server
- azure_rm_sqlserver:
- resource_group: "{{ resource_group }}"
- name: "sqlsrv{{ random_postfix }}"
- location: eastus
- admin_username: mylogin
- admin_password: Testpasswordxyz12!
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed == false
- - output.state == 'Ready'
-
-# azure_rm_sqlserver_facts tests
-
-- name: Gather facts SQL Server
- azure_rm_sqlserver_facts:
- resource_group: "{{ resource_group }}"
- server_name: "sqlsrv{{ random_postfix }}"
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.servers.sqlsrv{{ random_postfix }}.id != None
- - output.servers.sqlsrv{{ random_postfix }}.name == "sqlsrv{{ random_postfix }}"
- - output.servers.sqlsrv{{ random_postfix }}.type != None
- - output.servers.sqlsrv{{ random_postfix }}.location != None
- - output.servers.sqlsrv{{ random_postfix }}.kind != None
- - output.servers.sqlsrv{{ random_postfix }}.version != None
- - output.servers.sqlsrv{{ random_postfix }}.state != None
- - output.servers.sqlsrv{{ random_postfix }}.fully_qualified_domain_name != None
- - output.servers.sqlsrv{{ random_postfix }}.tags.aaa == 'bbb'
-
-- name: Gather facts SQL Server - unexisting
- azure_rm_sqlserver_facts:
- resource_group: "{{ resource_group }}"
- server_name: "unexisting"
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.servers == {}
-
-- name: Gather facts SQL Server - list
- azure_rm_sqlserver_facts:
- resource_group: "{{ resource_group }}"
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.servers.sqlsrv{{ random_postfix }}.id != None
- - output.servers.sqlsrv{{ random_postfix }}.name == "sqlsrv{{ random_postfix }}"
- - output.servers.sqlsrv{{ random_postfix }}.type != None
- - output.servers.sqlsrv{{ random_postfix }}.location != None
- - output.servers.sqlsrv{{ random_postfix }}.kind != None
- - output.servers.sqlsrv{{ random_postfix }}.version != None
- - output.servers.sqlsrv{{ random_postfix }}.state != None
- - output.servers.sqlsrv{{ random_postfix }}.fully_qualified_domain_name != None
-
-# azure_rm_sqldatabase tests
-
-- name: Create instance of SQL Database -- check mode
- azure_rm_sqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- name: database{{ random_postfix }}
- location: eastus
- check_mode: yes
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create instance of SQL Database
- azure_rm_sqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- name: database{{ random_postfix }}
- location: eastus
- tags:
- aaa: bbb
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
- - output.status == 'Online'
-
-- name: Create again instance of SQL Database
- azure_rm_sqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- name: database{{ random_postfix }}
- location: eastus
- tags:
- aaa: bbb
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed == false
- - output.status == 'Online'
-
-# test database facter:
-- name: Create second SQL Database
- azure_rm_sqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- name: database{{ random_postfix }}second
- location: eastus
-
-- name: Gather facts SQL Database
- azure_rm_sqldatabase_facts:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- name: database{{ random_postfix }}
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.databases[0].id != None
- - output.databases[0].name != None
- - output.databases[0].location != None
- - output.databases[0].sku.name != None
- - output.databases[0].sku.tier != None
- - output.databases[0].sku.capacity != None
- - output.databases[0].kind != None
- - output.databases[0].status != None
-
-- name: Gather facts SQL Database
- azure_rm_sqldatabase_facts:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.databases[0].id != None
- - output.databases[0].name != None
- - output.databases[0].location != None
- - output.databases[0].sku.name != None
- - output.databases[0].sku.tier != None
- - output.databases[0].sku.capacity != None
- - output.databases[0].kind != None
- - output.databases[0].status != None
- - output.databases[1].id != None
- - output.databases[1].name != None
- - output.databases[1].location != None
- - output.databases[1].sku.name != None
- - output.databases[1].sku.tier != None
- - output.databases[1].sku.capacity != None
- - output.databases[1].kind != None
- - output.databases[1].status != None
-
-- name: Delete instance of secondary database
- azure_rm_sqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- name: database{{ random_postfix }}second
- state: absent
-
-# clean up databases
-- name: Delete instance of SQL Database -- check mode
- azure_rm_sqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- name: database{{ random_postfix }}
- state: absent
- check_mode: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete instance of SQL Database
- azure_rm_sqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- name: database{{ random_postfix }}
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete unexisting instance of SQL Database
- azure_rm_sqldatabase:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- name: database{{ random_postfix }}
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed == false
-
-# test database facts without databases
-- name: Gather facts SQL Database
- azure_rm_sqldatabase_facts:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- name: database{{ random_postfix }}
- register: output
-- name: Assert that empty dictionary was returned
- assert:
- that:
- - output.changed == False
- - output.databases | length == 0
-
-- name: Gather facts SQL Database
- azure_rm_sqldatabase_facts:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- register: output
-- name: Assert that empty dictionary was returned (one database is there by default)
- assert:
- that:
- - output.changed == False
- - output.databases | length == 1
-
-# azure_rm_sqlfirewallrule tests
-
-- name: Create instance of Firewall Rule -- check mode
- azure_rm_sqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: "sqlsrv{{ random_postfix }}"
- name: firewallrule{{ random_postfix }}
- start_ip_address: 172.28.10.136
- end_ip_address: 172.28.10.138
- check_mode: yes
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create instance of Firewall Rule
- azure_rm_sqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: "sqlsrv{{ random_postfix }}"
- name: firewallrule{{ random_postfix }}
- start_ip_address: 172.28.10.136
- end_ip_address: 172.28.10.138
- register: output
-- name: Assert the resource instance is well created
- assert:
- that:
- - output.changed
-
-- name: Create again instance of Firewall Rule
- azure_rm_sqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: "sqlsrv{{ random_postfix }}"
- name: firewallrule{{ random_postfix }}
- start_ip_address: 172.28.10.136
- end_ip_address: 172.28.10.138
- register: output
-- name: Assert the state has not changed
- assert:
- that:
- - output.changed == false
-
-#
-# azure_rm_sqlserverfirewallrule_facts
-#
-
-- name: Create Firewall Rule - second
- azure_rm_sqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- name: firewallrule{{ random_postfix }}second
- start_ip_address: 172.28.10.136
- end_ip_address: 172.28.10.138
-
-- name: Gather facts SQL Firewall Rule
- azure_rm_sqlfirewallrule_facts:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- name: firewallrule{{ random_postfix }}
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.rules[0].id != None
- - output.rules[0].resource_group != None
- - output.rules[0].server_name != None
- - output.rules[0].name != None
- - output.rules[0].start_ip_address != None
- - output.rules[0].end_ip_address != None
-
-- name: Gather facts SQL Firewall Rule
- azure_rm_sqlfirewallrule_facts:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- register: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.rules[0].id != None
- - output.rules[0].id != None
- - output.rules[0].resource_group != None
- - output.rules[0].server_name != None
- - output.rules[0].name != None
- - output.rules[0].start_ip_address != None
- - output.rules[0].end_ip_address != None
- - output.rules[1].id != None
- - output.rules[1].resource_group != None
- - output.rules[1].server_name != None
- - output.rules[1].name != None
- - output.rules[1].start_ip_address != None
- - output.rules[1].end_ip_address != None
-
-- name: Delete instance of Firewall Rule
- azure_rm_sqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- name: firewallrule{{ random_postfix }}
- state: absent
-- name: Delete instance of Firewall Rule
- azure_rm_sqlfirewallrule:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- name: firewallrule{{ random_postfix }}second
- state: absent
-
-- name: Gather facts SQL Firewall Rule
- azure_rm_sqlfirewallrule_facts:
- resource_group: "{{ resource_group }}"
- server_name: sqlsrv{{ random_postfix }}
- name: firewallrule{{ random_postfix }}
- register: output
-- name: Assert that empty dictionary was returned
- assert:
- that:
- - output.changed == False
- - output.rules | length == 0
-
-# finalise & clean up azure_rm_sqlserver test
-
-- name: Delete instance of SQL Server -- check mode
- azure_rm_sqlserver:
- resource_group: "{{ resource_group }}"
- name: "sqlsrv{{ random_postfix }}"
- state: absent
- check_mode: yes
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete instance of SQL Server
- azure_rm_sqlserver:
- resource_group: "{{ resource_group }}"
- name: "sqlsrv{{ random_postfix }}"
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed
-
-- name: Delete unexisting instance of SQL Server
- azure_rm_sqlserver:
- resource_group: "{{ resource_group }}"
- name: "sqlsrv{{ random_postfix }}"
- state: absent
- register: output
-- name: Assert the state has changed
- assert:
- that:
- - output.changed == false
diff --git a/test/integration/targets/azure_rm_storageaccount/aliases b/test/integration/targets/azure_rm_storageaccount/aliases
deleted file mode 100644
index cbb31e489d..0000000000
--- a/test/integration/targets/azure_rm_storageaccount/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group2
-destructive
-disabled
diff --git a/test/integration/targets/azure_rm_storageaccount/meta/main.yml b/test/integration/targets/azure_rm_storageaccount/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_storageaccount/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_storageaccount/tasks/main.yml b/test/integration/targets/azure_rm_storageaccount/tasks/main.yml
deleted file mode 100644
index 8713dd2c31..0000000000
--- a/test/integration/targets/azure_rm_storageaccount/tasks/main.yml
+++ /dev/null
@@ -1,158 +0,0 @@
- - name: Create storage account name
- set_fact:
- storage_account: "{{ resource_group | hash('md5') | truncate(24, True, '') }}"
-
- - name: Test invalid account name
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "invalid_char$"
- register: invalid_name
- ignore_errors: yes
-
- - name: Assert task failed
- assert: { that: "invalid_name['failed'] == True" }
-
- - name: Delete storage account
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account }}"
- state: absent
- force_delete_nonempty: True
-
- - name: Create new storage account
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account }}"
- account_type: Standard_LRS
- append_tags: no
- blob_cors:
- - allowed_origins:
- - http://www.example.com/
- allowed_methods:
- - GET
- - POST
- allowed_headers:
- - x-ms-meta-data*
- - x-ms-meta-target*
- - x-ms-meta-abc
- exposed_headers:
- - x-ms-meta-*
- max_age_in_seconds: 200
- tags:
- test: test
- galaxy: galaxy
- register: output
-
- - name: Assert status succeeded and results include an Id value
- assert:
- that:
- - output.changed
- - output.state.id is defined
- - output.state.blob_cors | length == 1
-
- - name: Create new storage account (idempotence)
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account }}"
- account_type: Standard_LRS
- append_tags: no
- blob_cors:
- - allowed_origins:
- - http://www.example.com/
- allowed_methods:
- - GET
- - POST
- allowed_headers:
- - x-ms-meta-data*
- - x-ms-meta-target*
- - x-ms-meta-abc
- exposed_headers:
- - x-ms-meta-*
- max_age_in_seconds: 200
- tags:
- test: test
- galaxy: galaxy
- register: output
-
- - assert:
- that:
- - not output.changed
-
- - name: Gather facts by tags
- azure_rm_storageaccount_facts:
- resource_group: "{{ resource_group }}"
- tags:
- - test
- - galaxy
-
- - assert:
- that: azure_storageaccounts | length >= 1
-
- - name: Change account type
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account }}"
- account_type: Premium_LRS
- register: change_account
- ignore_errors: yes
-
- - name: Assert account type change failed
- assert: { that: "change_account['failed'] == True" }
-
- - name: Change account type and add custom domain
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account }}"
- account_type: Standard_GRS
- custom_domain: { name: ansible.com, use_sub_domain: no }
- register: change_account
- ignore_errors: yes
-
- - name: Assert CNAME failure
- assert: { that: "'Azure Error: StorageCustomDomainNameNotValid' in change_account['msg']" }
-
- - name: Update account tags
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account }}"
- append_tags: no
- tags:
- testing: testing
- delete: never
- register: output
-
- - assert:
- that:
- - "output.state.tags | length == 2"
- - "output.state.tags.testing == 'testing'"
- - "output.state.tags.delete == 'never'"
-
- - name: Gather facts
- azure_rm_storageaccount_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account }}"
- show_connection_string: True
- show_blob_cors: True
-
- - assert:
- that:
- - "azure_storageaccounts| length == 1"
- - "storageaccounts | length == 1"
- - not storageaccounts[0].custom_domain
- - storageaccounts[0].account_type == "Standard_GRS"
- - storageaccounts[0].primary_endpoints.blob.connectionstring
- - storageaccounts[0].blob_cors
-
- - name: Gather facts
- azure_rm_storageaccount_facts:
- resource_group: "{{ resource_group }}"
-
- - assert:
- that:
- - "azure_storageaccounts | length > 0"
-
- - name: Delete acccount
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account }}"
- state: absent
diff --git a/test/integration/targets/azure_rm_storageblob/aliases b/test/integration/targets/azure_rm_storageblob/aliases
deleted file mode 100644
index e823954320..0000000000
--- a/test/integration/targets/azure_rm_storageblob/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group2
-destructive
-unstable
diff --git a/test/integration/targets/azure_rm_storageblob/files/Ratings.png b/test/integration/targets/azure_rm_storageblob/files/Ratings.png
deleted file mode 100644
index 8dd3e3dbc1..0000000000
--- a/test/integration/targets/azure_rm_storageblob/files/Ratings.png
+++ /dev/null
Binary files differ
diff --git a/test/integration/targets/azure_rm_storageblob/meta/main.yml b/test/integration/targets/azure_rm_storageblob/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_storageblob/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_storageblob/tasks/main.yml b/test/integration/targets/azure_rm_storageblob/tasks/main.yml
deleted file mode 100644
index 0bbc181fbb..0000000000
--- a/test/integration/targets/azure_rm_storageblob/tasks/main.yml
+++ /dev/null
@@ -1,119 +0,0 @@
-- name: Create storage account name
- set_fact:
- storage_account: "{{ resource_group | hash('md5') | truncate(24, True, '') }}"
-
-- name: Create storage account
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account }}"
- account_type: Standard_LRS
-
-- name: Create container
- azure_rm_storageblob:
- resource_group: "{{ resource_group }}"
- account_name: "{{ storage_account }}"
- container_name: my-blobs
-
-- name: Force upload blob
- azure_rm_storageblob:
- resource_group: "{{ resource_group }}"
- account_name: "{{ storage_account }}"
- container_name: my-blobs
- blob: 'Ratings.png'
- src: './targets/azure_rm_storageblob/files/Ratings.png'
- content_type: image/png
- tags:
- val1: foo
- val2: bar
- force: yes
-
-- name: storage blob seems to have some timing issues
- wait_for:
- delay: 10
-
-- name: Upload blob idempotence
- azure_rm_storageblob:
- resource_group: "{{ resource_group }}"
- account_name: "{{ storage_account }}"
- container_name: my-blobs
- blob: 'Ratings.png'
- src: './targets/azure_rm_storageblob/files/Ratings.png'
- content_type: image/png
- tags:
- val1: foo
- val2: bar
- register: upload_facts
-
-- assert:
- that: "not upload_facts.changed"
-
-- name: Download file idempotence
- azure_rm_storageblob:
- resource_group: "{{ resource_group }}"
- account_name: "{{ storage_account }}"
- container_name: my-blobs
- blob: 'Ratings.png'
- dest: './targets/azure_rm_storageblob/files/Ratings.png'
- register: download_results
-
-- assert:
- that: not download_results.changed
-
-- file: path="/tmp/Ratings.png" state=absent
-
-- name: Download file
- azure_rm_storageblob:
- resource_group: "{{ resource_group }}"
- account_name: "{{ storage_account }}"
- container_name: my-blobs
- blob: 'Ratings.png'
- dest: '/tmp/Ratings.png'
- register: download_results
-
-- assert:
- that: "download_results.changed"
-
-- find: paths='/tmp' patterns="Ratings.png"
- register: find_results
-
-- assert: { that: "find_results['matched'] == 1" }
-
-- name: Do not delete container that has blobs
- azure_rm_storageblob:
- resource_group: "{{ resource_group }}"
- account_name: "{{ storage_account }}"
- container_name: my-blobs
- state: absent
- register: output
-
-- assert:
- that: "not output.changed"
-
-- name: Delete blob object
- azure_rm_storageblob:
- resource_group: "{{ resource_group }}"
- account_name: "{{ storage_account }}"
- container_name: my-blobs
- blob: "Ratings.png"
- state: absent
- register: output
-
-- assert:
- that: "output.changed"
-
-- name: Delete container
- azure_rm_storageblob:
- resource_group: "{{ resource_group }}"
- account_name: "{{ storage_account }}"
- container_name: my-blobs
- state: absent
- register: output
-
-- assert:
- that: "output.changed"
-
-- name: Delete storage account
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account }}"
- state: absent
diff --git a/test/integration/targets/azure_rm_subnet/aliases b/test/integration/targets/azure_rm_subnet/aliases
deleted file mode 100644
index aa77c071a8..0000000000
--- a/test/integration/targets/azure_rm_subnet/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/azure
-shippable/azure/group2
-destructive
diff --git a/test/integration/targets/azure_rm_subnet/meta/main.yml b/test/integration/targets/azure_rm_subnet/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_subnet/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_subnet/tasks/main.yml b/test/integration/targets/azure_rm_subnet/tasks/main.yml
deleted file mode 100644
index 8b4efcc4fe..0000000000
--- a/test/integration/targets/azure_rm_subnet/tasks/main.yml
+++ /dev/null
@@ -1,182 +0,0 @@
-- name: Create virtual network
- azure_rm_virtualnetwork:
- name: My_Virtual_Network
- address_prefixes_cidr:
- - 10.1.0.0/16
- - 172.100.0.0/16
- dns_servers:
- - 127.0.0.1
- - 127.0.0.3
- tags:
- testing: testing
- delete: on-exit
- resource_group: "{{ resource_group }}"
-
-- name: Create route table
- azure_rm_routetable:
- name: routetableforsubnet
- resource_group: "{{ resource_group }}"
- register: route_table
-
-- name: Remove subnet
- azure_rm_subnet:
- state: absent
- name: foobar
- virtual_network_name: My_Virtual_Network
- resource_group: "{{ resource_group }}"
-
-- name: Catch invalid cidr
- azure_rm_subnet:
- name: foobar
- virtual_network_name: My_Virtual_Network
- resource_group: "{{ resource_group }}"
- address_prefix_cidr: "10.1.0/24"
- register: output
- ignore_errors: yes
-
-- assert:
- that: output.failed
-
-- name: Add the subnet back
- azure_rm_subnet:
- name: foobar
- virtual_network_name: My_Virtual_Network
- resource_group: "{{ resource_group }}"
- address_prefix_cidr: "10.1.0.0/24"
- register: output
-
-- assert:
- that: output.changed
-
-- name: Add the subnet back (idempontent)
- azure_rm_subnet:
- name: foobar
- virtual_network_name: My_Virtual_Network
- resource_group: "{{ resource_group }}"
- register: output
-
-- assert:
- that: not output.changed
-
-- name: Create network security group
- azure_rm_securitygroup:
- name: secgroupfoo
- resource_group: "{{ resource_group }}"
- tags:
- testing: testing
-
-- name: Update the subnet
- azure_rm_subnet:
- name: foobar
- virtual_network_name: My_Virtual_Network
- resource_group: "{{ resource_group }}"
- address_prefix_cidr: "10.1.0.0/16"
- security_group: secgroupfoo
- service_endpoints:
- - service: Microsoft.Sql
- locations:
- - eastus
- - westus
-
-- name: Should be idempotent
- azure_rm_subnet:
- name: foobar
- virtual_network_name: My_Virtual_Network
- resource_group: "{{ resource_group }}"
- address_prefix_cidr: "10.1.0.0/16"
- service_endpoints:
- - service: Microsoft.Sql
- locations:
- - eastus
- - westus
- register: output
-
-- assert:
- that: not output.changed
-
-- name: Create network security group in another resource group
- azure_rm_securitygroup:
- name: secgroupfoo
- resource_group: "{{ resource_group_secondary }}"
- register: nsg
-
-- name: Update the subnet
- azure_rm_subnet:
- name: foobar
- virtual_network_name: My_Virtual_Network
- resource_group: "{{ resource_group }}"
- address_prefix_cidr: "10.1.0.0/16"
- route_table: "{{ route_table.id }}"
- security_group:
- name: secgroupfoo
- resource_group: "{{ resource_group_secondary }}"
- register: output
-
-- assert:
- that:
- - output.changed
- - output.state.network_security_group.id == nsg.state.id
-
-- name: Update the subnet (idempotent)
- azure_rm_subnet:
- name: foobar
- virtual_network_name: My_Virtual_Network
- resource_group: "{{ resource_group }}"
- address_prefix_cidr: "10.1.0.0/16"
- security_group: "{{ nsg.state.id }}"
- register: output
-
-- assert:
- that: not output.changed
-
-- name: Get subnet facts
- azure_rm_subnet_facts:
- name: foobar
- virtual_network_name: My_Virtual_Network
- resource_group: "{{ resource_group }}"
- register: output
-
-- debug:
- var: output
-- name: Assert that facts are returned
- assert:
- that:
- - output.changed == False
- - output.subnets[0]['id'] != None
- - output.subnets[0]['resource_group'] != None
- - output.subnets[0]['virtual_network_name'] != None
- - output.subnets[0]['name'] != None
- - output.subnets[0]['address_prefix_cidr'] != None
- - output.subnets[0]['route_table'] != None
- - output.subnets[0]['security_group'] != None
- - output.subnets[0]['provisioning_state'] != None
-
-- name: Remove subnet
- azure_rm_subnet:
- state: absent
- name: foobar
- virtual_network_name: My_Virtual_Network
- resource_group: "{{ resource_group }}"
-
-- name: Remove subnet (idempotent)
- azure_rm_subnet:
- state: absent
- name: foobar
- virtual_network_name: My_Virtual_Network
- resource_group: "{{ resource_group }}"
- register: output
-
-- assert:
- that: not output.changed
-
-- name: Remove security group
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: secgroupfoo
- state: absent
-
-- name: Remove virtual network
- azure_rm_virtualnetwork:
- name: My_Virtual_Network
- resource_group: "{{ resource_group }}"
- state: absent
diff --git a/test/integration/targets/azure_rm_trafficmanagerprofile/aliases b/test/integration/targets/azure_rm_trafficmanagerprofile/aliases
deleted file mode 100644
index a31676ea12..0000000000
--- a/test/integration/targets/azure_rm_trafficmanagerprofile/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/azure
-shippable/azure/group6
-destructive
diff --git a/test/integration/targets/azure_rm_trafficmanagerprofile/meta/main.yml b/test/integration/targets/azure_rm_trafficmanagerprofile/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_trafficmanagerprofile/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_trafficmanagerprofile/tasks/main.yml b/test/integration/targets/azure_rm_trafficmanagerprofile/tasks/main.yml
deleted file mode 100644
index dba81c0e25..0000000000
--- a/test/integration/targets/azure_rm_trafficmanagerprofile/tasks/main.yml
+++ /dev/null
@@ -1,289 +0,0 @@
-- name: Prepare random number
- set_fact:
- tmname: "tm{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- endpointname1: "ep1{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- endpointname2: "ep2{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-
-- name: Create a Traffic Manager profile(check mode)
- azure_rm_trafficmanagerprofile:
- resource_group: "{{ resource_group }}"
- name: "{{ tmname }}"
- tags:
- testing: testing
- delete: on-exit
- foo: bar
- location: global
- profile_status: enabled
- routing_method: performance
- dns_config:
- relative_name: "{{ tmname }}"
- ttl: 60
- monitor_config:
- protocol: HTTPS
- port: 80
- path: '/'
- check_mode: yes
-
-- name: Check there is no Traffic Manager profile created
- azure_rm_trafficmanagerprofile_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ tmname }}"
- register: fact
-
-- name: Check there is no Traffic Manager profile created
- assert: { that: "{{ fact.tms | length }} == 0" }
-
-- name: Create a Traffic Manager profile
- azure_rm_trafficmanagerprofile:
- resource_group: "{{ resource_group }}"
- name: "{{ tmname }}"
- tags:
- testing: testing
- delete: on-exit
- foo: bar
- location: global
- profile_status: enabled
- routing_method: performance
- dns_config:
- relative_name: "{{ tmname }}"
- ttl: 60
- monitor_config:
- protocol: HTTPS
- port: 80
- path: '/'
- register: tm
-
-- name: Assert the Traffic Manager profile is well created
- assert:
- that:
- - tm.changed
-
-- name: Gather Traffic Manager profile facts
- azure_rm_trafficmanagerprofile_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ tmname }}"
- register: fact
-
-- name: Assert fact returns the created one
- assert:
- that:
- - "fact.tms | length == 1"
- - fact.tms[0].id == tm.id
- - fact.tms[0].endpoints | length == 0
-
-- name: Create a Traffic Manager profile (idempotent)
- azure_rm_trafficmanagerprofile:
- resource_group: "{{ resource_group }}"
- name: "{{ tmname }}"
- tags:
- testing: testing
- delete: on-exit
- foo: bar
- location: global
- profile_status: enabled
- routing_method: performance
- dns_config:
- relative_name: "{{ tmname }}"
- ttl: 60
- monitor_config:
- protocol: HTTPS
- port: 80
- path: '/'
- register: output
-
-- name: Assert idempotent
- assert:
- that:
- - not output.changed
-
-- name: Update the Traffic Manager profile
- azure_rm_trafficmanagerprofile:
- resource_group: "{{ resource_group }}"
- name: "{{ tmname }}"
- tags:
- testing: testing
- delete: on-exit
- foo: bar
- location: global
- profile_status: disabled
- routing_method: priority
- dns_config:
- relative_name: "{{ tmname }}"
- ttl: 60
- monitor_config:
- protocol: HTTPS
- port: 80
- path: '/'
- register: output
-
-- name: Assert the Traffic Manager profile is updated
- assert:
- that:
- - output.changed
-
-- name: Create Traffic Manager endpoint(check mode)
- azure_rm_trafficmanagerendpoint:
- resource_group: "{{ resource_group }}"
- profile_name: "{{ tmname }}"
- name: "{{ endpointname1 }}"
- type: external_endpoints
- location: westus
- priority: 2
- weight: 1
- target: 1.2.3.4
- check_mode: yes
- register: output
-
-- name: Assert check mode changed
- assert:
- that:
- - output.changed
-
-- name: Get endpoint
- azure_rm_trafficmanagerendpoint_facts:
- resource_group: "{{ resource_group }}"
- profile_name: "{{ tmname }}"
- register: facts
-
-- name: Check no endpoint created in check mode
- assert:
- that:
- - facts.endpoints | length == 0
-
-- name: Create Traffic Manager endpoint
- azure_rm_trafficmanagerendpoint:
- resource_group: "{{ resource_group }}"
- profile_name: "{{ tmname }}"
- name: "{{ endpointname1 }}"
- type: external_endpoints
- location: westus
- priority: 2
- weight: 1
- target: 1.2.3.4
- register: output
-
-- name: Assert endpoint create changed
- assert:
- that:
- - output.changed
-
-- name: Get endpoint
- azure_rm_trafficmanagerendpoint_facts:
- resource_group: "{{ resource_group }}"
- profile_name: "{{ tmname }}"
- register: facts
-
-- name: Check endpoint created
- assert:
- that:
- - facts.endpoints | length == 1
- - facts.endpoints[0].name == "{{ endpointname1 }}"
-
-- name: Create second Traffic Manager endpoint
- azure_rm_trafficmanagerendpoint:
- resource_group: "{{ resource_group }}"
- profile_name: "{{ tmname }}"
- name: "{{ endpointname2 }}"
- type: external_endpoints
- location: westus
- priority: 1
- weight: 3
- target: 4.3.2.1
-
-- name: Get endpoint
- azure_rm_trafficmanagerendpoint_facts:
- resource_group: "{{ resource_group }}"
- profile_name: "{{ tmname }}"
- register: facts
-
-- name: Check 2 endpoint in profile
- assert:
- that:
- - facts.endpoints | length == 2
-
-- name: Create endpoint (idempotent)
- azure_rm_trafficmanagerendpoint:
- resource_group: "{{ resource_group }}"
- profile_name: "{{ tmname }}"
- name: "{{ endpointname2 }}"
- type: external_endpoints
- location: westus
- priority: 1
- weight: 3
- target: 4.3.2.1
- register: output
-
-- name: Assert endpoint creation idempotent
- assert:
- that:
- - output.changed == False
-
-- name: Delete second endpoint
- azure_rm_trafficmanagerendpoint:
- resource_group: "{{ resource_group }}"
- profile_name: "{{ tmname }}"
- name: "{{ endpointname2 }}"
- type: external_endpoints
- state: absent
- register: output
-
-- name: Assert endpoint deletion changed
- assert:
- that:
- - output.changed
-
-- name: Get endpoint
- azure_rm_trafficmanagerendpoint_facts:
- resource_group: "{{ resource_group }}"
- profile_name: "{{ tmname }}"
- register: facts
-
-- name: Check 1 endpoint left in profile
- assert:
- that:
- - facts.endpoints | length == 1
-
-- name: Delete the Traffic Manager profile(check mode)
- azure_rm_trafficmanagerprofile:
- resource_group: "{{ resource_group }}"
- name: "{{ tmname }}"
- state: absent
- check_mode: yes
-
-- name: Gather Traffic Manager profile facts
- azure_rm_trafficmanagerprofile_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ tmname }}"
- register: fact
-
-- name: Assert the traffic manager profile is still there
- assert:
- that:
- - "fact.tms | length == 1"
- - fact.tms[0].id == tm.id
- - fact.tms[0].endpoints | length == 1
-
-- name: Delete the Traffic Manager profile
- azure_rm_trafficmanagerprofile:
- resource_group: "{{ resource_group }}"
- name: "{{ tmname }}"
- state: absent
- register: output
-
-- name: Assert the Traffic Manager profile is well deleted
- assert:
- that:
- - output.changed
-
-- name: Get Traffic Manager profile fact
- azure_rm_trafficmanagerprofile_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ tmname }}"
- register: fact
-
-- name: Assert fact returns empty
- assert:
- that:
- - "fact.tms | length == 0"
diff --git a/test/integration/targets/azure_rm_virtualmachine/aliases b/test/integration/targets/azure_rm_virtualmachine/aliases
deleted file mode 100644
index a5238c1984..0000000000
--- a/test/integration/targets/azure_rm_virtualmachine/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group7
-destructive
-azure_rm_virtualmachine_facts
diff --git a/test/integration/targets/azure_rm_virtualmachine/inventory.yml b/test/integration/targets/azure_rm_virtualmachine/inventory.yml
deleted file mode 100644
index acd98ebf7f..0000000000
--- a/test/integration/targets/azure_rm_virtualmachine/inventory.yml
+++ /dev/null
@@ -1,66 +0,0 @@
-all:
- hosts:
- azure_test_invalid:
- azure_test_public_ip:
- network: 10.42.0.0/24
- subnet: 10.42.0.0/28
-
- azure_test_no_public_ip:
- network: 10.42.1.0/24
- subnet: 10.42.1.0/28
-
- azure_test_deallocate:
- network: 10.42.2.0/24
- subnet: 10.42.2.0/28
-
- azure_test_minimal:
- network: 10.42.3.0/24
- subnet: 10.42.3.0/28
-
- azure_test_dual_nic:
- network: 10.42.4.0/24
- subnet: 10.42.4.0/28
- secondary_network: 10.42.5.0/24
- secondary_subnet: 10.42.5.0/28
- nic_list:
- - name: "{{ 'int' ~ uid_short ~ '-1' }}"
- resource_group: "{{ resource_group_secondary }}"
- - name: "{{ 'int' ~ uid_short ~ '-2' }}"
- resource_group: "{{ resource_group_secondary }}"
-
- vars:
- ansible_connection: local
- ansible_python_interpreter: "{{ ansible_playbook_python }}"
-
- uid: "{{ (resource_group ~ inventory_hostname) | hash('md5') | truncate(18, True, '') }}"
- uid_short: "{{ (resource_group ~ inventory_hostname) | hash('md5') | truncate(10, True, '') }}"
-
- storage_account: "{{ 'stor' ~ uid }}"
- availability_set: "{{ 'avbs' ~ uid_short }}"
- vm_name: "{{ 'vm' ~ uid_short }}"
- network_name: "{{ 'vnet' ~ uid_short }}"
- subnet_name: "{{ 'snet' ~ uid_short }}"
- security_group: "{{ 'sg' ~ uid_short }}"
- public_ip_name: "{{ 'ip' ~ uid_short }}"
- interface_name: "{{ 'int' ~ uid_short }}"
-
- ssh_keys:
- - path: '/home/chouseknecht/.ssh/authorized_keys'
- key_data: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC1igsIlcmTa/yfsJnTtnrEX7PP/a01gwbXcig6JOKyrUmJB8E6c/wtZwP115VSyDRTO6TEL/sBFUpkSw01zM8ydNATErh8meBlAlbnDq5NLhDXnMizgG0VNn0iLc/WplFTqkefsHXa8NtIxAtyEVIj/fKbK3XfBOdEpE3+MJYNtGlWyaod28W+5qmQPZDQys+YnE4OjSwN7D3g85/7dtLFvDH+lEC4ooJOaxVFr9VSMXUIkaRF6oI+R1Zu803LFSCTb4BfFOYOHPuQ/rEMP0KuUzggvP+TEBY14PEA2FoHOn+oRsT0ZR2+loGRaxSVqCQKaEHbNbkm+6Rllx2NQRO0BJxCSKRU1iifInLPxmSc4gvsHCKMAWy/tGkmKHPWIfN8hvwyDMK5MNBp/SJ1pVx4xuFDQjVWNbll0yk2+72uJgtFHHwEPK9QsOz45gX85vS3yhYCKrscS/W9h2l36SWwQXuGy4fXotE7esPsvNGAzBndHX1O8RMPg47qJXz059RyoGforoa9TnzIs3hIv+ts7ESx3OEq3HNk0FJ+wDka7IM7WQpGrVToJ0vfDy9Q46nw54vv5Zc/u4OZF3F5twHmyf3rLYKXRDuCvZQKT2iWQKVX6j63bq6orA5hwl22zndxWZNtOwtq8Sd0Ns0K/Fo/ggYDDGBtr68DwhA+MrxrHw== chouseknecht@ansible.com"
-
- image:
- offer: CentOS
- publisher: OpenLogic
- sku: '7.1'
- version: latest
-
- image_paid:
- publisher: cognosys
- offer: ubuntu-14-04-lts
- sku: hardened-ubuntu-14-04
- version: latest
-
- plan_paid:
- name: hardened-ubuntu-14-04
- product: ubuntu-14-04-lts
- publisher: cognosys
diff --git a/test/integration/targets/azure_rm_virtualmachine/main.yml b/test/integration/targets/azure_rm_virtualmachine/main.yml
deleted file mode 100644
index 7722487bff..0000000000
--- a/test/integration/targets/azure_rm_virtualmachine/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-- name: Run Azurue VM tests in parallel
- hosts: all
- gather_facts: no
- strategy: free
- tasks:
- - name: Include tasks based on inventory hostname
- include_tasks: tasks/{{ inventory_hostname }}.yml
diff --git a/test/integration/targets/azure_rm_virtualmachine/runme.sh b/test/integration/targets/azure_rm_virtualmachine/runme.sh
deleted file mode 100755
index c7895c9d26..0000000000
--- a/test/integration/targets/azure_rm_virtualmachine/runme.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env bash
-
-set -eux
-
-ansible-playbook -i inventory.yml main.yml "$@"
diff --git a/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_deallocate.yml b/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_deallocate.yml
deleted file mode 100644
index 52fc0102e8..0000000000
--- a/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_deallocate.yml
+++ /dev/null
@@ -1,90 +0,0 @@
-- include_tasks: setup.yml
-
-- name: Create minimal VM with defaults
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- admin_username: "testuser"
- admin_password: "Pass123$$$abx!"
- vm_size: Standard_A0
- virtual_network: "{{ network_name }}"
- image:
- offer: UbuntuServer
- publisher: Canonical
- sku: 16.04-LTS
- version: latest
- register: vm_output
-
-- name: Restart the virtual machine
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- restarted: yes
- vm_size: Standard_A0
- register: restart_result
-
-- name: Ensue VM was restarted
- assert:
- that:
- - "azure_vm.powerstate in ['starting', 'running']"
- - restart_result is changed
-
-- name: Deallocate the virtual machine
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- allocated: no
- vm_size: Standard_A0
- register: deallocate_result
-
-- name: Ensure VM was deallocated
- assert:
- that:
- - azure_vm.powerstate == 'deallocated'
- - deallocate_result is changed
-
-- name: Start the virtual machine
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- vm_size: Standard_A0
- register: start_result
-
-- name: Ensure VM was started
- assert:
- that:
- - "azure_vm.powerstate in ['starting', 'running']"
- - start_result is changed
-
-- name: Delete VM
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- state: absent
- remove_on_absent: all_autocreated
-
-- name: Destroy subnet
- azure_rm_subnet:
- resource_group: "{{ resource_group }}"
- virtual_network: "{{ network_name }}"
- name: "{{ subnet_name }}"
- state: absent
-
-- name: Destroy virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: "{{ network_name }}"
- state: absent
-
-- name: Destroy availability set
- azure_rm_availabilityset:
- resource_group: "{{ resource_group }}"
- name: "{{ availability_set }}"
- state: absent
-
-- name: Destroy storage account
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account }}"
- force_delete_nonempty: yes
- state: absent
diff --git a/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_dual_nic.yml b/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_dual_nic.yml
deleted file mode 100644
index a5bbbad9a2..0000000000
--- a/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_dual_nic.yml
+++ /dev/null
@@ -1,131 +0,0 @@
-- include_tasks: setup.yml
-
-- name: Create virtual network in secondary resource group
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group_secondary }}"
- name: "{{ network_name ~ '-2' }}"
- address_prefixes: "{{ secondary_network }}"
- register: create_virt_net_result
-
-- name: Create subnet in secondary resource group
- azure_rm_subnet:
- resource_group: "{{ resource_group_secondary }}"
- name: "{{ subnet_name ~ '-2' }}"
- address_prefix: "{{ secondary_subnet }}"
- virtual_network: "{{ network_name ~ '-2' }}"
-
-- name: Create NICs for dual NIC VM in secondary resource group
- azure_rm_networkinterface:
- resource_group: "{{ item.resource_group }}"
- name: "{{ item.name }}"
- virtual_network: "{{ network_name ~ '-2' }}"
- subnet: "{{ subnet_name ~ '-2' }}"
- loop: "{{ nic_list }}"
-
-- name: Create virtual machine with two NICs
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}" # Should this be resource_group_secondary?
- name: "{{ vm_name }}"
- vm_size: Standard_A0
- storage_account: "{{ storage_account }}"
- storage_container: "{{ vm_name }}"
- storage_blob: "{{ vm_name }}.vhd"
- admin_username: adminuser
- admin_password: Password123!
- short_hostname: testvm
- os_type: Linux
- os_disk_size_gb: 64
- os_disk_name: testosdiskxx
- network_interfaces: "{{ nic_list }}"
- availability_set: "{{ availability_set }}"
- image:
- offer: UbuntuServer
- publisher: Canonical
- sku: 16.04-LTS
- version: latest
- tags:
- abc: def
-
-- name: Ensure VM was created properly
- assert:
- that:
- - azure_vm.properties.availabilitySet.id
- - azure_vm.properties.storageProfile.osDisk.name == 'testosdiskxx'
-
-- name: Retrieve VM facts (filtering by name)
- azure_rm_virtualmachine_facts:
- resource_group: "{{ resource_group }}" # Should this be resource_group_secondary?
- name: "{{ vm_name }}"
- register: vm_facts_results
-
-- name: Ensure facts module returned the second VM
- assert:
- that:
- - vm_facts_results.vms | length == 1
- - vm_facts_results.vms[0].name == "{{ vm_name }}"
- - vm_facts_results.vms[0].location
- - vm_facts_results.vms[0].admin_username == 'adminuser'
- - vm_facts_results.vms[0].resource_group == "{{ resource_group }}"
- - vm_facts_results.vms[0].power_state != None
-
-- name: Retrieve facts by tags
- azure_rm_virtualmachine_facts:
- tags:
- - abc:def
- register: facts_by_tags_results
-
-- name: Assert that facts module returned the second VM
- assert:
- that:
- - facts_by_tags_results.vms | length >= 1
-
-- name: Should be idempotent with a dual NICs
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}" # Should this be resource_group_secondary?
- name: "{{ vm_name }}"
- vm_size: Standard_A0
- storage_account: "{{ storage_account }}"
- storage_container: "{{ vm_name }}"
- storage_blob: "{{ vm_name }}.vhd"
- admin_username: adminuser
- admin_password: Password123!
- short_hostname: testvm
- os_type: Linux
- os_disk_size_gb: 64
- network_interfaces: "{{ nic_list }}"
- availability_set: "{{ availability_set }}"
- image:
- offer: UbuntuServer
- publisher: Canonical
- sku: 16.04-LTS
- version: latest
- register: dual_nics_result
-
-- name: Ensure nothing changed
- assert:
- that: dual_nics_result is not changed
-
-- name: Generalize VM
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}" # Should this be resource_group_secondary?
- name: "{{ vm_name }}"
- generalized: yes
-
-- name: Gather facts and check if machine is generalized
- azure_rm_virtualmachine_facts:
- resource_group: "{{ resource_group }}" # Should this be resource_group_secondary?
- name: "{{ vm_name }}"
- register: generalized_output
-
-- name: Ensure power state is generalized
- assert:
- that: generalized_output.vms[0].power_state == 'generalized'
-
-- name: Delete dual NIC VM
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}" # Should this be resource_group_secondary?
- name: "{{ vm_name }}"
- state: absent
- vm_size: Standard_A0
- async: 5000
- poll: 0
diff --git a/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_invalid.yml b/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_invalid.yml
deleted file mode 100644
index 86ec72de1f..0000000000
--- a/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_invalid.yml
+++ /dev/null
@@ -1,35 +0,0 @@
-# TODO: Until we have a module to create/delete images this is the best tests I can do
-- name: Assert error thrown with invalid image dict
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- image:
- offer: UbuntuServer
- register: fail_invalid_image_dict
- failed_when: 'fail_invalid_image_dict.msg != "parameter error: expecting image to contain [publisher, offer, sku, version], [name, resource_group] or [id]"'
-
-- name: Assert error thrown with invalid image type
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- image:
- - testing
- register: fail_invalid_image_type
- failed_when: 'fail_invalid_image_type.msg != "parameter error: expecting image to be a string or dict not list"'
-
-- name: Assert error finding missing custom image
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- image: invalid-image
- register: fail_missing_custom_image
- failed_when: fail_missing_custom_image.msg != "Error could not find image with name invalid-image"
-
-- name: Assert error finding missing custom image (dict style)
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- image:
- name: invalid-image
- register: fail_missing_custom_image_dict
- failed_when: fail_missing_custom_image_dict.msg != "Error could not find image with name invalid-image"
diff --git a/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_minimal.yml b/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_minimal.yml
deleted file mode 100644
index 8f2767f34e..0000000000
--- a/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_minimal.yml
+++ /dev/null
@@ -1,118 +0,0 @@
-- include_tasks: setup.yml
-
-# # Tests possible when CI user acccount setup with required authority
-# - name: Create virtual machine with image and plan which requires acceptance of terms
-# azure_rm_virtualmachine:
-# resource_group: "{{ resource_group }}"
-# name: testvm009
-# vm_size: Standard_A0
-# storage_account: "{{ storage_account }}"
-# storage_container: testvm001
-# storage_blob: testvm003.vhd
-# admin_username: adminuser
-# admin_password: Password123!
-# short_hostname: testvm
-# os_type: Linux
-# availability_set: "{{ availability_set }}"
-# image: "{{ image_paid }}"
-# plan_paid: "{{ plan_paid }}"
-# register: create_image_plan_result
-
-# - assert:
-# that:
-# - create_image_plan_result is changed
-# - create_image_plan_result.ansible_facts.azure_vm.properties.storageProfile.imageReference.publisher == image_paid.publisher
-
-# - name: Should be idempotent with image and plan which requires acceptance of terms
-# azure_rm_virtualmachine:
-# resource_group: "{{ resource_group }}"
-# name: testvm009
-# vm_size: Standard_A0
-# storage_account: "{{ storage_account }}"
-# storage_container: testvm001
-# storage_blob: testvm003.vhd
-# admin_username: adminuser
-# admin_password: Password123!
-# short_hostname: testvm
-# os_type: Linux
-# availability_set: "{{ availability_set }}"
-# image: "{{ image_paid }}"
-# plan_paid: "{{ plan_paid }}"
-# register: create_image_plan_again_result
-
-# - assert:
-# that: create_image_plan_again is not changed
-
-- name: Create minimal VM with defaults
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- admin_username: "testuser"
- admin_password: "Pass123$$$abx!"
- vm_size: Standard_B1ms
- virtual_network: "{{ network_name }}"
- image:
- offer: UbuntuServer
- publisher: Canonical
- sku: 16.04-LTS
- version: latest
- register: vm_output
-
-- name: Delete VM
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- remove_on_absent: all_autocreated
- state: absent
-
-- name: Query auto created NIC
- azure_rm_networkinterface_info:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}01"
- register: nic_result
-
-- name: Query auto created security group
- azure_rm_securitygroup_info:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}01"
- register: nsg_result
-
-- name: Query auto created public IP
- azure_rm_publicipaddress_info:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}01"
- register: pip_result
-
-- name: Assert that autocreated resources were deleted
- assert:
- that:
- # what about the default storage group?
- - nic_result.networkinterfaces | length == 0
- - nsg_result.securitygroups | length == 0
- - pip_result.publicipaddresses | length == 0
-
-- name: Destroy subnet
- azure_rm_subnet:
- resource_group: "{{ resource_group }}"
- virtual_network: "{{ network_name }}"
- name: "{{ subnet_name }}"
- state: absent
-
-- name: Destroy virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: "{{ network_name }}"
- state: absent
-
-- name: Destroy availability set
- azure_rm_availabilityset:
- resource_group: "{{ resource_group }}"
- name: "{{ availability_set }}"
- state: absent
-
-- name: Destroy storage account
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account }}"
- force_delete_nonempty: yes
- state: absent
diff --git a/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_no_public_ip.yml b/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_no_public_ip.yml
deleted file mode 100644
index f06eea8524..0000000000
--- a/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_no_public_ip.yml
+++ /dev/null
@@ -1,41 +0,0 @@
-- include_tasks: setup.yml
-
-- name: Create virtual machine without public ip address and with boot diagnostics enabled
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- vm_size: Standard_A0
- admin_username: adminuser
- admin_password: Password123!
- short_hostname: testvm
- os_type: Linux
- public_ip_allocation_method: Disabled
- storage_account_name: "{{ storage_account }}"
- availability_set: "{{ availability_set }}"
- virtual_network: "{{ network_name }}"
- boot_diagnostics:
- enabled: yes
- image:
- offer: UbuntuServer
- publisher: Canonical
- sku: 16.04-LTS
- version: latest
- register: create_vm_public_result
-
-- name: Ensure VM was created properly
- assert:
- that:
- - azure_vm.properties.diagnosticsProfile.bootDiagnostics.enabled
- - azure_vm.properties.diagnosticsProfile.bootDiagnostics.storageUri is defined
- - azure_vm.properties.instanceView.bootDiagnostics.consoleScreenshotBlobUri is defined
- - azure_vm.properties.instanceView.bootDiagnostics.serialConsoleLogBlobUri is defined
- - not 'publicIPAddress' in create_vm_public_result.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].properties.ipConfigurations[0].properties
-
-- name: Delete VM with no public ip
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- state: absent
- remove_on_absent: all_autocreated
- async: 5000
- poll: 0
diff --git a/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_public_ip.yml b/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_public_ip.yml
deleted file mode 100644
index f25d944a78..0000000000
--- a/test/integration/targets/azure_rm_virtualmachine/tasks/azure_test_public_ip.yml
+++ /dev/null
@@ -1,311 +0,0 @@
-- include_tasks: setup.yml
-
-- name: Create public ip
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- allocation_method: Static
- name: "{{ public_ip_name }}"
-
-- name: Create security group
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ security_group }}"
- purge_rules: yes
- rules:
- - name: ALLOW_SSH
- protocol: Tcp
- destination_port_range: 22
- access: Allow
- priority: 100
- direction: Inbound
-
- - name: ALLOW_HTTP
- protocol: Tcp
- destination_port_range: 80
- access: Allow
- priority: 110
- direction: Inbound
-
-- name: Create network interface
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "{{ interface_name }}"
- virtual_network: "{{ network_name }}"
- subnet: "{{ subnet_name }}"
- public_ip_name: "{{ public_ip_name }}"
- security_group: "{{ security_group }}"
-
-- name: Create virtual machine with a single NIC and no boot diagnostics
- register: output
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- vm_size: Standard_A0
- storage_account: "{{ storage_account }}"
- storage_container: "{{ vm_name }}"
- storage_blob: "{{ vm_name }}.vhd"
- admin_username: adminuser
- admin_password: Password123!
- short_hostname: testvm
- os_type: Linux
- network_interfaces: "{{ interface_name }}"
- availability_set: "{{ availability_set }}"
- image:
- offer: UbuntuServer
- publisher: Canonical
- sku: 16.04-LTS
- version: latest
- custom_data: |
- #!/bin/sh
- echo "custom_data was executed" > /tmp/custom_data.txt
-
-- name: Ensure VM was created properly
- assert:
- that:
- - azure_vm.properties.provisioningState == 'Succeeded'
- - azure_vm.properties.availabilitySet.id
- # initial response from creation has no diagnosticsProfile
- # if you run it again however, there is one in the response
- # so we handle both cases
- - "'diagnosticsProfile' not in azure_vm.properties or not azure_vm.properties.diagnosticsProfile.bootDiagnostics.enabled"
-
-- name: Get facts for virtual machine without boot diagnostics disabled
- azure_rm_virtualmachine_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- register: vm_facts_no_boot_diag_result
-
-- name: Ensure VM facts are correct
- assert:
- that:
- - vm_facts_no_boot_diag_result.vms != []
- - not vm_facts_no_boot_diag_result.vms[0].boot_diagnostics.enabled
- - not vm_facts_no_boot_diag_result.vms[0].boot_diagnostics.storage_uri
-
-- name: Enable boot diagnostics on an existing VM for the first time without specifying a storage account
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- boot_diagnostics:
- enabled: yes
- # without specifying storage_account you get a new default storage account for the VM
-
-- name: Ensure VM properties are correct
- assert:
- that:
- - azure_vm.properties.diagnosticsProfile.bootDiagnostics.enabled
- - azure_vm.properties.diagnosticsProfile.bootDiagnostics.storageUri is defined
- - azure_vm.properties.instanceView.bootDiagnostics.consoleScreenshotBlobUri is defined
- - azure_vm.properties.instanceView.bootDiagnostics.serialConsoleLogBlobUri is defined
-
-- name: Get facts for virtual machine with boot diagnostics enabled
- azure_rm_virtualmachine_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- register: vm_facts_boot_diag_result
-
-- name: Ensure VM facts were returned
- assert:
- that:
- - vm_facts_boot_diag_result.vms != []
- - vm_facts_boot_diag_result.vms[0].boot_diagnostics.enabled
- - vm_facts_boot_diag_result.vms[0].boot_diagnostics.storage_uri is defined
- - vm_facts_boot_diag_result.vms[0].boot_diagnostics.console_screenshot_uri is defined
- - vm_facts_boot_diag_result.vms[0].boot_diagnostics.serial_console_log_uri is defined
-
-- name: Change the boot diagnostics storage account while enabled
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- boot_diagnostics:
- enabled: yes
- storage_account: "{{ storage_account }}"
- ignore_errors: yes
-
-- name: Disable boot diagnostics and change the storage account at the same time
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- boot_diagnostics:
- enabled: no
- storage_account: "{{ storage_account }}"
-
-- name: Ensure boot diagnostics was disabled
- assert:
- that:
- - not azure_vm.properties.diagnosticsProfile.bootDiagnostics.enabled
-
-- name: Re-enable boot diagnostics on an existing VM where it was previously configured
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- boot_diagnostics:
- enabled: yes
- register: reenable_boot_diag_result
-
-- name: Ensure boot diagnostics was reenabled
- assert:
- that:
- - azure_vm.properties.diagnosticsProfile.bootDiagnostics.enabled
- - azure_vm.properties.diagnosticsProfile.bootDiagnostics.storageUri is defined
- - azure_vm.properties.instanceView.bootDiagnostics.consoleScreenshotBlobUri is defined
- - azure_vm.properties.instanceView.bootDiagnostics.serialConsoleLogBlobUri is defined
-
-# - add_host:
-# name: new_azure_vm
-# ansible_host: '{{ reenable_boot_diag_result.ansible_facts.azure_vm.properties.networkProfile.networkInterfaces[0].properties.ipConfigurations[0].properties.publicIPAddress.properties.ipAddress }}'
-# ansible_connection: paramiko # not guaranteed to have sshpass...
-# ansible_user: adminuser
-# ansible_password: Password123!
-# ansible_host_key_checking: no
-
-# - name: wait for SSH port to be open
-# wait_for:
-# host: '{{ hostvars["new_azure_vm"].ansible_host }}'
-# port: 22
-# timeout: 60
-# state: started
-
-# # TODO: figure out how to make this work under ansible-test with the coverage injector
-# - block:
-# - name: wait for host to answer on SSH
-# delegate_to: new_azure_vm
-# wait_for_connection:
-
-# - name: get content from custom_data script
-# raw: cat /tmp/custom_data.txt
-# register: custom_data_content
-
-# - name: assert contents
-# assert:
-# that: custom_data_content.stdout | regex_search('custom_data was executed')
-# delegate_to: new_azure_vm
-
-# # TODO: figure out how to make this work under ansible-test with the coverage injector
-# - name: wait for file/content created by custom_data script
-# delegate_to: new_azure_vm
-# vars:
-# ansible_python_interpreter: python
-# wait_for:
-# path: /tmp/custom_data.txt
-# search_regex: ^custom_data was executed$
-# timeout: 20
-
-- name: Should be idempotent with a single NIC
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- vm_size: Standard_A0
- storage_account: "{{ storage_account }}"
- storage_container: "{{ vm_name }}"
- storage_blob: "{{ vm_name }}.vhd"
- admin_username: adminuser
- admin_password: Password123!
- short_hostname: testvm
- os_type: Linux
- network_interfaces: "{{ interface_name }}"
- image:
- offer: UbuntuServer
- publisher: Canonical
- sku: 16.04-LTS
- version: latest
- register: single_nic_result
-
-- name: Ensure nothing changed
- assert:
- that: single_nic_result is not changed
-
-- name: Resize VM
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- vm_size: Standard_A1
- storage_account: "{{ storage_account }}"
- storage_container: "{{ vm_name }}"
- storage_blob: "{{ vm_name }}.vhd"
- admin_username: adminuser
- admin_password: Password123!
- short_hostname: testvm
- os_type: Linux
- network_interfaces: "{{ interface_name }}"
- image:
- offer: UbuntuServer
- publisher: Canonical
- sku: 16.04-LTS
- version: latest
- register: resize_result
-
-- name: Esure VM was resized
- assert:
- that:
- - resize_result is changed
- - resize_result.ansible_facts.azure_vm.properties.hardwareProfile.vmSize == "Standard_A1"
-
-- name: Delete VM
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "{{ vm_name }}"
- state: absent
- vm_size: Standard_A0
-
-- name: NIC should be gone
- azure_rm_networkinterface_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ interface_name }}"
-
-- name: Ensure NIC was removed
- assert:
- that: azure_networkinterfaces | length == 0
-
-- name: Public IP should be gone
- azure_rm_publicipaddress_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ public_ip_name }}"
-
-- name: Ensure public IP was removed
- assert:
- that: azure_publicipaddresses | length == 0
-
-- name: Destroy NIC
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "{{ interface_name }}"
- state: absent
-
-- name: Destroy security group
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: "{{ security_group }}"
- state: absent
-
-- name: Destroy subnet
- azure_rm_subnet:
- resource_group: "{{ resource_group }}"
- virtual_network: "{{ network_name }}"
- name: "{{ subnet_name }}"
- state: absent
-
-- name: Destroy virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: "{{ network_name }}"
- state: absent
-
-- name: Destroy public ip
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- name: "{{ public_ip_name }}"
- state: absent
-
-- name: Destroy availability set
- azure_rm_availabilityset:
- resource_group: "{{ resource_group }}"
- name: "{{ availability_set }}"
- state: absent
-
-- name: Destroy storage account
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account }}"
- force_delete_nonempty: true
- state: absent
diff --git a/test/integration/targets/azure_rm_virtualmachine/tasks/setup.yml b/test/integration/targets/azure_rm_virtualmachine/tasks/setup.yml
deleted file mode 100644
index f053cac03d..0000000000
--- a/test/integration/targets/azure_rm_virtualmachine/tasks/setup.yml
+++ /dev/null
@@ -1,26 +0,0 @@
-- debug:
- msg: "UID is {{ uid_short }}"
-
-- name: SETUP | Create storage account
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account }}"
- account_type: Standard_LRS
-
-- name: SETUP | Create availability set
- azure_rm_availabilityset:
- name: "{{ availability_set }}"
- resource_group: "{{ resource_group }}"
-
-- name: SETUP | Create virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: "{{ network_name }}"
- address_prefixes: "{{ network }}"
-
-- name: SETUP | Create subnet
- azure_rm_subnet:
- resource_group: "{{ resource_group }}"
- name: "{{ subnet_name }}"
- address_prefix: "{{ subnet }}"
- virtual_network: "{{ network_name }}"
diff --git a/test/integration/targets/azure_rm_virtualmachineextension/aliases b/test/integration/targets/azure_rm_virtualmachineextension/aliases
deleted file mode 100644
index a66cfb4dbb..0000000000
--- a/test/integration/targets/azure_rm_virtualmachineextension/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group5
-destructive
-azure_rm_virtualmachineextension_facts
diff --git a/test/integration/targets/azure_rm_virtualmachineextension/meta/main.yml b/test/integration/targets/azure_rm_virtualmachineextension/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_virtualmachineextension/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_virtualmachineextension/tasks/main.yml b/test/integration/targets/azure_rm_virtualmachineextension/tasks/main.yml
deleted file mode 100644
index e195ac731a..0000000000
--- a/test/integration/targets/azure_rm_virtualmachineextension/tasks/main.yml
+++ /dev/null
@@ -1,181 +0,0 @@
-- name: Create Random Storage Account Name
- set_fact:
- storage_account: "{{ resource_group | hash('md5') | truncate(24, True, '') }}"
-
-- name: Create virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: testVnet
- address_prefixes: "10.0.0.0/16"
-
-- name: Add subnet
- azure_rm_subnet:
- resource_group: "{{ resource_group }}"
- name: testSubnet
- address_prefix: "10.0.1.0/24"
- virtual_network: testVnet
-
-- name: Create public IP address
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- allocation_method: Dynamic
- name: testPublicIP
-
-- name: Create Network Security Group that allows SSH
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: testNetworkSecurityGroup
- rules:
- - name: SSH
- protocol: Tcp
- destination_port_range: 22
- access: Allow
- priority: 1001
- direction: Inbound
-
-- name: Create virtual network interface card
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: testNIC
- virtual_network: testVnet
- subnet: testSubnet
- public_ip_name: testPublicIP
- security_group_name: testNetworkSecurityGroup
-
-- name: create a storage account
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account }}"
- type: Standard_LRS
-
-- name: Create VM
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: testVM
- vm_size: Standard_DS1_v2
- admin_username: testuser
- ssh_password_enabled: true
- admin_password: "Password1234!"
- network_interfaces: testNIC
- storage_account_name: "{{ storage_account }}"
- storage_container: osdisk
- storage_blob: osdisk.vhd
- os_disk_caching: ReadWrite
- image:
- offer: CoreOS
- publisher: CoreOS
- sku: Stable
- version: latest
-
-- name: Create VM Extension
- azure_rm_virtualmachineextension:
- resource_group: "{{ resource_group }}"
- name: testVMExtension
- virtual_machine_name: testVM
- publisher: Microsoft.Azure.Extensions
- virtual_machine_extension_type: CustomScript
- type_handler_version: 2.0
- auto_upgrade_minor_version: true
- settings: {"commandToExecute": "hostname"}
- register: results
-
-- name: Assert that VM Extension ran
- assert:
- that: results.changed
-
-- name: Query extension
- azure_rm_virtualmachineextension_facts:
- resource_group: "{{ resource_group }}"
- name: testVMExtension
- virtual_machine_name: testVM
- register: results
-- name: Assert that facts are returned
- assert:
- that:
- - results.changed == False
- - results.extensions[0]['id'] != None
- - results.extensions[0]['resource_group'] != None
- - results.extensions[0]['virtual_machine_name'] != None
- - results.extensions[0]['name'] != None
- - results.extensions[0]['location'] != None
- - results.extensions[0]['publisher'] != None
- - results.extensions[0]['type'] != None
- - results.extensions[0]['settings'] != None
- - results.extensions[0]['auto_upgrade_minor_version'] != None
- - results.extensions[0]['provisioning_state'] != None
-
-- name: List extensions
- azure_rm_virtualmachineextension_facts:
- resource_group: "{{ resource_group }}"
- virtual_machine_name: testVM
- register: results
-- name: Assert that facts are returned
- assert:
- that:
- - results.changed == False
- - results.extensions[0]['id'] != None
- - results.extensions[0]['resource_group'] != None
- - results.extensions[0]['virtual_machine_name'] != None
- - results.extensions[0]['name'] != None
- - results.extensions[0]['location'] != None
- - results.extensions[0]['publisher'] != None
- - results.extensions[0]['type'] != None
- - results.extensions[0]['settings'] != None
- - results.extensions[0]['auto_upgrade_minor_version'] != None
- - results.extensions[0]['provisioning_state'] != None
-
-- name: Delete VM Extension
- azure_rm_virtualmachineextension:
- resource_group: "{{ resource_group }}"
- name: testVMExtension
- virtual_machine_name: testVM
- state: absent
- publisher: Microsoft.Azure.Extensions
- virtual_machine_extension_type: CustomScript
- type_handler_version: 2.0
- auto_upgrade_minor_version: true
- settings: {"commandToExecute": "hostname"}
- register: results
-
-- name: Assert that VM Extension deleted
- assert:
- that: results.changed
-
-- name: Delete VM
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: testVM
- state: absent
- remove_on_absent: ['all']
- vm_size: Standard_DS1_v2
- admin_username: testuser
- network_interfaces: testNIC
- storage_container: osdisk
- storage_blob: osdisk.vhd
- os_disk_caching: ReadWrite
- image:
- offer: CoreOS
- publisher: CoreOS
- sku: Stable
- version: latest
-
-- name: Delete a storage account
- azure_rm_storageaccount:
- resource_group: "{{ resource_group }}"
- name: "{{ storage_account }}"
- type: Standard_LRS
- state: absent
- force_delete_nonempty: true
-
-- name: Delete Network Security Group that allows SSH
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: testNetworkSecurityGroup
- state: absent
-
-- name: Delete virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: testVnet
- state: absent
- address_prefixes: "10.0.0.0/16"
diff --git a/test/integration/targets/azure_rm_virtualmachineimage_info/aliases b/test/integration/targets/azure_rm_virtualmachineimage_info/aliases
deleted file mode 100644
index 9175999b45..0000000000
--- a/test/integration/targets/azure_rm_virtualmachineimage_info/aliases
+++ /dev/null
@@ -1,4 +0,0 @@
-cloud/azure
-shippable/azure/group3
-shippable/azure/smoketest
-destructive
diff --git a/test/integration/targets/azure_rm_virtualmachineimage_info/meta/main.yml b/test/integration/targets/azure_rm_virtualmachineimage_info/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_virtualmachineimage_info/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_virtualmachineimage_info/tasks/main.yml b/test/integration/targets/azure_rm_virtualmachineimage_info/tasks/main.yml
deleted file mode 100644
index 5d16141dfa..0000000000
--- a/test/integration/targets/azure_rm_virtualmachineimage_info/tasks/main.yml
+++ /dev/null
@@ -1,43 +0,0 @@
-- name: set location
- set_fact:
- location: eastus
-
-- name: Get facts for a specific image
- azure_rm_virtualmachineimage_info:
- location: "{{ location }}"
- publisher: OpenLogic
- offer: CentOS
- sku: '7.3'
- version: '7.3.20170707'
- register: output
-
-- assert:
- that: output['vmimages'] | length == 1
-
-- name: List available versions
- azure_rm_virtualmachineimage_info:
- location: "{{ location }}"
- publisher: OpenLogic
- offer: CentOS
- sku: '7.3'
- register: output
-
-- assert:
- that: output['vmimages'] | length > 0
-
-- name: List available offers
- azure_rm_virtualmachineimage_info:
- location: "{{ location }}"
- publisher: OpenLogic
- register: output
-
-- assert:
- that: output['vmimages'] | length > 0
-
-- name: List available publishers
- azure_rm_virtualmachineimage_info:
- location: "{{ location }}"
- register: output
-
-- assert:
- that: output['vmimages'] | length > 0
diff --git a/test/integration/targets/azure_rm_virtualmachinescaleset/aliases b/test/integration/targets/azure_rm_virtualmachinescaleset/aliases
deleted file mode 100644
index 00187fa67a..0000000000
--- a/test/integration/targets/azure_rm_virtualmachinescaleset/aliases
+++ /dev/null
@@ -1,7 +0,0 @@
-cloud/azure
-shippable/azure/group10
-destructive
-azure_rm_virtualmachinescaleset_facts
-azure_rm_virtualmachinescalesetinstance_facts
-azure_rm_virtualmachinescalesetextension
-azure_rm_virtualmachinescalesetextension_facts
diff --git a/test/integration/targets/azure_rm_virtualmachinescaleset/meta/main.yml b/test/integration/targets/azure_rm_virtualmachinescaleset/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_virtualmachinescaleset/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_virtualmachinescaleset/tasks/main.yml b/test/integration/targets/azure_rm_virtualmachinescaleset/tasks/main.yml
deleted file mode 100644
index 67a0b93802..0000000000
--- a/test/integration/targets/azure_rm_virtualmachinescaleset/tasks/main.yml
+++ /dev/null
@@ -1,617 +0,0 @@
-- name: Prepare random number
- set_fact:
- rpfx: "{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- run_once: yes
-
-- name: Create virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: testVnet
- address_prefixes: "10.0.0.0/16"
-
-- name: Add subnet
- azure_rm_subnet:
- resource_group: "{{ resource_group }}"
- name: testSubnet
- address_prefix: "10.0.1.0/24"
- virtual_network: testVnet
-
-- name: Create public IP address
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- allocation_method: Static
- name: testPublicIP
-
-- name: Create load balancer
- azure_rm_loadbalancer:
- resource_group: "{{ resource_group }}"
- name: testLB
- public_ip_address_name: testPublicIP
-
-- name: Create public IP address 1
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- allocation_method: Static
- name: testPublicIP1
-
-- name: Create load balancer 1
- azure_rm_loadbalancer:
- resource_group: "{{ resource_group }}"
- name: testLB1
- public_ip_address_name: testPublicIP1
-
-- name: Create network security group within same resource group of VMSS.
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: testNetworkSecurityGroup
-
-- name: Create network security group in different resource group of VMSS.
- azure_rm_securitygroup:
- resource_group: "{{ resource_group_secondary }}"
- name: testNetworkSecurityGroup2
-
-- name: Create virtual network inteface cards for VM A and B
- azure_rm_networkinterface:
- resource_group: "{{ resource_group }}"
- name: "vmforimage{{ rpfx }}nic"
- virtual_network: testVnet
- subnet: testSubnet
-
-- name: Create VM
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "vmforimage{{ rpfx }}"
- admin_username: testuser
- admin_password: "Password1234!"
- vm_size: Standard_B1ms
- network_interfaces: "vmforimage{{ rpfx }}nic"
- image:
- offer: UbuntuServer
- publisher: Canonical
- sku: 16.04-LTS
- version: latest
-- name: Generalize VM
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "vmforimage{{ rpfx }}"
- generalized: yes
-- name: Create image A
- azure_rm_image:
- resource_group: "{{ resource_group }}"
- name: testimagea
- source: "vmforimage{{ rpfx }}"
-- name: Create image B
- azure_rm_image:
- resource_group: "{{ resource_group }}"
- name: testimageb
- source: "vmforimage{{ rpfx }}"
-- name: Delete VM
- azure_rm_virtualmachine:
- resource_group: "{{ resource_group }}"
- name: "vmforimage{{ rpfx }}"
- state: absent
-
-- name: Create VMSS (check mode)
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ resource_group }}"
- name: testVMSS{{ rpfx }}
- vm_size: Standard_B1s
- admin_username: testuser
- ssh_password_enabled: true
- admin_password: "Password1234!"
- capacity: 1
- virtual_network_name: testVnet
- subnet_name: testSubnet
- load_balancer: testLB
- upgrade_policy: Manual
- tier: Standard
- managed_disk_type: Standard_LRS
- os_disk_caching: ReadWrite
- image:
- offer: CoreOS
- publisher: CoreOS
- sku: Stable
- version: latest
- data_disks:
- - lun: 0
- disk_size_gb: 64
- caching: ReadWrite
- managed_disk_type: Standard_LRS
- register: results
- check_mode: yes
-
-- name: Assert that VMSS can be created
- assert:
- that: results.changed
-
-- name: Get VMSS to assert no VMSS is created in check mode
- azure_rm_virtualmachinescaleset_facts:
- resource_group: "{{ resource_group }}"
- name: testVMSS{{ rpfx }}
- format: curated
- register: output_scaleset
-
-- name: Assert no VMSS created in check mode
- assert:
- that:
- - output_scaleset.ansible_facts.azure_vmss | length == 0
-
-- name: Create VMSS
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ resource_group }}"
- name: testVMSS{{ rpfx }}
- vm_size: Standard_B1s
- admin_username: testuser
- ssh_password_enabled: true
- admin_password: "Password1234!"
- capacity: 1
- virtual_network_name: testVnet
- subnet_name: testSubnet
- upgrade_policy: Manual
- load_balancer: testLB
- tier: Standard
- managed_disk_type: Standard_LRS
- os_disk_caching: ReadWrite
- custom_data: "#cloud-config"
- image:
- offer: CoreOS
- publisher: CoreOS
- sku: Stable
- version: latest
- data_disks:
- - lun: 0
- disk_size_gb: 64
- caching: ReadWrite
- managed_disk_type: Standard_LRS
- scale_in_policy: "NewestVM"
- register: results
-
-- name: Assert that VMSS was created
- assert:
- that: results.changed
-
-- name: Create VMSS -- test upgrade_policy idempotence and load balancer
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ resource_group }}"
- name: testVMSS{{ rpfx }}
- vm_size: Standard_B1s
- admin_username: testuser
- ssh_password_enabled: true
- admin_password: "Password1234!"
- capacity: 1
- virtual_network_name: testVnet
- subnet_name: testSubnet
- upgrade_policy: Automatic
- load_balancer: testLB1
- tier: Standard
- managed_disk_type: Standard_LRS
- os_disk_caching: ReadWrite
- custom_data: "#cloud-config"
- image:
- offer: CoreOS
- publisher: CoreOS
- sku: Stable
- version: latest
- data_disks:
- - lun: 0
- disk_size_gb: 64
- caching: ReadWrite
- managed_disk_type: Standard_LRS
- register: results
-
-- name: Assert that VMSS was created
- assert:
- that: results.changed
-
-- name: Retrieve scaleset facts
- azure_rm_virtualmachinescaleset_facts:
- resource_group: "{{ resource_group }}"
- name: testVMSS{{ rpfx }}
- format: curated
- register: output_scaleset
-
-- assert:
- that:
- - output_scaleset.vmss[0].load_balancer == "testLB1"
-
-- name: Retrieve scaleset VMs facts
- azure_rm_virtualmachinescalesetinstance_facts:
- resource_group: "{{ resource_group }}"
- vmss_name: testVMSS{{ rpfx }}
- register: instances
-
-- name: Assert that facts returned correctly
- assert:
- that:
- - instances.instances | length == 1
- - instances.instances[0].id != None
- - instances.instances[0].name != None
- - instances.instances[0].instance_id != None
- - instances.instances[0].provisioning_state != None
- - instances.instances[0].vm_id != None
- - instances.instances[0].latest_model != None
- - instances.instances[0].power_state != None
-
-- name: Get scaleset body
- set_fact:
- body: "{{ output_scaleset.vmss[0] }}"
-
-- name: Try to update VMSS using output as input
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ body.resource_group }}"
- name: "{{ body.name }}"
- vm_size: "{{ body.vm_size }}"
- admin_username: "{{ body.admin_username }}"
- ssh_password_enabled: "{{ body.ssh_password_enabled }}"
- admin_password: "Password1234!"
- capacity: "{{ body.capacity }}"
- virtual_network_name: "{{ body.virtual_network_name }}"
- subnet_name: "{{ body.subnet_name }}"
- upgrade_policy: "{{ body.upgrade_policy }}"
- load_balancer: "{{ body.load_balancer }}"
- tier: "{{ body.tier }}"
- managed_disk_type: "{{ body.managed_disk_type }}"
- os_disk_caching: "{{ body.os_disk_caching }}"
- image: "{{ body.image }}"
- data_disks: "{{ body.data_disks }}"
- overprovision: "{{ body.overprovision }}"
- register: results
-
-- name: Assert that nothing was changed
- assert:
- that: not results.changed
-
-- name: Install VMSS Extension
- azure_rm_virtualmachinescalesetextension:
- resource_group: "{{ resource_group }}"
- vmss_name: testVMSS{{ rpfx }}
- name: testExtension
- publisher: Microsoft.Azure.Extensions
- type: CustomScript
- type_handler_version: 2.0
- auto_upgrade_minor_version: true
- settings: {"commandToExecute": "sudo apt-get -y install apache2"}
- register: results
-
-- name: Assert that something was changed
- assert:
- that: results.changed
-
-- name: Install Again VMSS Extension - again
- azure_rm_virtualmachinescalesetextension:
- resource_group: "{{ resource_group }}"
- vmss_name: testVMSS{{ rpfx }}
- name: testExtension
- publisher: Microsoft.Azure.Extensions
- type: CustomScript
- type_handler_version: 2.0
- auto_upgrade_minor_version: true
- settings: {"commandToExecute": "sudo apt-get -y install apache2"}
- register: results
-
-- name: Assert that nothing was changed
- assert:
- that: not results.changed
-
-- name: Query extension
- azure_rm_virtualmachinescalesetextension_facts:
- resource_group: "{{ resource_group }}"
- vmss_name: testVMSS{{ rpfx }}
- name: testExtension
- register: results
-- name: Assert that facts are returned
- assert:
- that:
- - results.changed == False
- - results.extensions[0]['id'] != None
- - results.extensions[0]['resource_group'] != None
- - results.extensions[0]['vmss_name'] != None
- - results.extensions[0]['name'] != None
- - results.extensions[0]['publisher'] != None
- - results.extensions[0]['type'] != None
- - results.extensions[0]['settings'] != None
- - results.extensions[0]['auto_upgrade_minor_version'] != None
- - results.extensions[0]['provisioning_state'] != None
-
-- name: List extensions
- azure_rm_virtualmachinescalesetextension_facts:
- resource_group: "{{ resource_group }}"
- vmss_name: testVMSS{{ rpfx }}
- register: results
-- name: Assert that facts are returned
- assert:
- that:
- - results.changed == False
- - results.extensions[0]['id'] != None
- - results.extensions[0]['resource_group'] != None
- - results.extensions[0]['vmss_name'] != None
- - results.extensions[0]['name'] != None
- - results.extensions[0]['publisher'] != None
- - results.extensions[0]['type'] != None
- - results.extensions[0]['settings'] != None
- - results.extensions[0]['auto_upgrade_minor_version'] != None
- - results.extensions[0]['provisioning_state'] != None
-
-
-- name: Delete VMSS Extension
- azure_rm_virtualmachinescalesetextension:
- resource_group: "{{ resource_group }}"
- vmss_name: testVMSS{{ rpfx }}
- name: testExtension
- state: absent
- register: results
-
-- name: Assert that change was reported
- assert:
- that: results.changed
-
-- name: Upgrade instance to the latest image
- azure_rm_virtualmachinescalesetinstance:
- resource_group: "{{ resource_group }}"
- vmss_name: testVMSS{{ rpfx }}
- instance_id: "{{ instances.instances[0].instance_id }}"
- latest_model: yes
- register: results
-
-- name: Assert that something has changed
- assert:
- that: results.changed
-
-- name: Stop virtual machine
- azure_rm_virtualmachinescalesetinstance:
- resource_group: "{{ resource_group }}"
- vmss_name: testVMSS{{ rpfx }}
- instance_id: "{{ instances.instances[0].instance_id }}"
- power_state: stopped
- register: results
-
-- name: Assert that something has changed
- assert:
- that: results.changed
-
-- name: Delete instance
- azure_rm_virtualmachinescalesetinstance:
- resource_group: "{{ resource_group }}"
- vmss_name: testVMSS{{ rpfx }}
- instance_id: "{{ instances.instances[0].instance_id }}"
- state: absent
- register: results
-
-- name: Assert that something has changed
- assert:
- that: results.changed
-
-
-- name: Delete VMSS
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ resource_group }}"
- name: testVMSS{{ rpfx }}
- state: absent
-
-- name: Create VMSS with security group in same resource group, with accelerated networking(check mode).
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ resource_group }}"
- name: testVMSS{{ rpfx }}2
- vm_size: Standard_D3_v2
- capacity: 0
- virtual_network_name: testVnet
- subnet_name: testSubnet
- admin_username: testuser
- ssh_password_enabled: true
- admin_password: "Password1234!"
- image:
- name: testimagea
- resource_group: "{{ resource_group }}"
- upgrade_policy: Manual
- security_group: testNetworkSecurityGroup
- enable_accelerated_networking: yes
- register: results
- check_mode: yes
-
-- name: Assert that VMSS can be created
- assert:
- that: results.changed
-
-- name: Create VMSS with security group in same resource group, with accelerated networking.
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ resource_group }}"
- name: testVMSS{{ rpfx }}2
- vm_size: Standard_D3_v2
- capacity: 0
- virtual_network_name: testVnet
- subnet_name: testSubnet
- admin_username: testuser
- ssh_password_enabled: true
- admin_password: "Password1234!"
- image:
- name: testimagea
- resource_group: "{{ resource_group }}"
- upgrade_policy: Manual
- security_group: testNetworkSecurityGroup
- enable_accelerated_networking: yes
- register: results
-
-- name: Assert that VMSS ran
- assert:
- that:
- - 'results.changed'
- - 'results.ansible_facts.azure_vmss.properties.virtualMachineProfile.networkProfile.networkInterfaceConfigurations.0.properties.enableAcceleratedNetworking == true'
- - 'results.ansible_facts.azure_vmss.properties.virtualMachineProfile.networkProfile.networkInterfaceConfigurations.0.properties.networkSecurityGroup != {}'
-
-- name: Create VMSS with security group in same resource group, with accelerated networking.
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ resource_group }}"
- name: testVMSS{{ rpfx }}2
- vm_size: Standard_D3_v2
- capacity: 0
- virtual_network_name: testVnet
- subnet_name: testSubnet
- admin_username: testuser
- ssh_password_enabled: true
- admin_password: "Password1234!"
- image:
- name: testimagea
- resource_group: "{{ resource_group }}"
- upgrade_policy: Manual
- security_group: testNetworkSecurityGroup
- enable_accelerated_networking: yes
- register: results
-
-- name: Assert that nothing has changed
- assert:
- that:
- - not results.changed
-
-- name: Create VMSS with security group in same resource group, with accelerated networking.
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ resource_group }}"
- name: testVMSS{{ rpfx }}2
- vm_size: Standard_D3_v2
- capacity: 0
- virtual_network_name: testVnet
- subnet_name: testSubnet
- admin_username: testuser
- ssh_password_enabled: true
- admin_password: "Password1234!"
- image:
- name: testimageb
- resource_group: "{{ resource_group }}"
- upgrade_policy: Manual
- security_group: testNetworkSecurityGroup
- enable_accelerated_networking: yes
- register: results
-
-- name: Assert that something has changed
- assert:
- that:
- - results.changed
-
-- name: update VMSS with security group in different resource group.
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ resource_group }}"
- name: testVMSS{{ rpfx }}2
- vm_size: Standard_B1s
- capacity: 0
- virtual_network_name: testVnet
- subnet_name: testSubnet
- admin_username: testuser
- ssh_password_enabled: true
- admin_password: "Password1234!"
- image:
- name: testimageb
- resource_group: "{{ resource_group }}"
- upgrade_policy: Manual
- security_group:
- name: testNetworkSecurityGroup2
- resource_group: "{{ resource_group_secondary }}"
- register: results
-
-# disable for now
-#- name: Assert that security group is correct
-# assert:
-# that:
-# - 'results.changed'
-# - '"testNetworkSecurityGroup2" in results.ansible_facts.azure_vmss.properties.virtualMachineProfile.networkProfile.networkInterfaceConfigurations.0.properties.networkSecurityGroup.id'
-
-- name: Delete VMSS
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ resource_group }}"
- name: testVMSS{{ rpfx }}2
- state: absent
-
-- name: Fail when instance type is not supported to enable accelerated networking
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ resource_group }}"
- name: testVMSS{{ rpfx }}4
- vm_size: Standard_B1s
- virtual_network_name: testVnet
- subnet_name: testSubnet
- admin_username: testuser
- ssh_password_enabled: true
- admin_password: "Password1234!"
- image:
- offer: CoreOS
- publisher: CoreOS
- sku: Stable
- version: latest
- upgrade_policy: Manual
- enable_accelerated_networking: yes
- register: results
- ignore_errors: yes
-
-- name: Assert failure to show that accelerated networking is enabled only with supported instance types.
- assert:
- that:
- - '"VMSizeIsNotPermittedToEnableAcceleratedNetworkingForVmss" in results.msg'
-
-- name: Delete network security group
- azure_rm_securitygroup:
- resource_group: "{{ resource_group }}"
- name: testNetworkSecurityGroup
- state: absent
-
-- name: Delete network security group
- azure_rm_securitygroup:
- resource_group: "{{ resource_group_secondary }}"
- name: testNetworkSecurityGroup2
- state: absent
-
-- name: Delete load balancer
- azure_rm_loadbalancer:
- resource_group: "{{ resource_group }}"
- name: testLB
- state: absent
-
-- name: Delete public IP address
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- state: absent
- name: testPublicIP
-
-- name: Delete virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: testVnet
- state: absent
- address_prefixes: "10.0.0.0/16"
-
-# TODO: Until we have a module to create/delete images this is the best tests
-# I can do
-- name: assert error thrown with invalid image dict
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ resource_group }}"
- name: testvm002
- vm_size: Standard_B1s
- image:
- offer: UbuntuServer
- register: fail_invalid_image_dict
- failed_when: 'fail_invalid_image_dict.msg != "parameter error: expecting image to contain [publisher, offer, sku, version], [name, resource_group] or [id]"'
-
-- name: assert error thrown with invalid image type
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ resource_group }}"
- name: testvm002
- vm_size: Standard_B1s
- image:
- - testing
- register: fail_invalid_image_type
- failed_when: 'fail_invalid_image_type.msg != "parameter error: expecting image to be a string or dict not list"'
-
-- name: assert error finding missing custom image
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ resource_group }}"
- name: testvm002
- vm_size: Standard_B1s
- image: invalid-image
- register: fail_missing_custom_image
- failed_when: fail_missing_custom_image.msg != "Error could not find image with name invalid-image"
-
-- name: assert error finding missing custom image (dict style)
- azure_rm_virtualmachinescaleset:
- resource_group: "{{ resource_group }}"
- name: testvm002
- vm_size: Standard_B1s
- image:
- name: invalid-image
- register: fail_missing_custom_image_dict
- failed_when: fail_missing_custom_image_dict.msg != "Error could not find image with name invalid-image"
diff --git a/test/integration/targets/azure_rm_virtualnetwork/aliases b/test/integration/targets/azure_rm_virtualnetwork/aliases
deleted file mode 100644
index aa77c071a8..0000000000
--- a/test/integration/targets/azure_rm_virtualnetwork/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/azure
-shippable/azure/group2
-destructive
diff --git a/test/integration/targets/azure_rm_virtualnetwork/meta/main.yml b/test/integration/targets/azure_rm_virtualnetwork/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_virtualnetwork/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_virtualnetwork/tasks/main.yml b/test/integration/targets/azure_rm_virtualnetwork/tasks/main.yml
deleted file mode 100644
index ff402462d2..0000000000
--- a/test/integration/targets/azure_rm_virtualnetwork/tasks/main.yml
+++ /dev/null
@@ -1,181 +0,0 @@
-- name: Prepare random number
- set_fact:
- vnetname: "vnet{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
-
-- name: Delete virtual network, if it exists
- azure_rm_virtualnetwork:
- name: "{{ vnetname }}"
- resource_group: "{{ resource_group }}"
- state: absent
-
-- name: Create virtual network
- azure_rm_virtualnetwork:
- name: "{{ vnetname }}"
- address_prefixes_cidr:
- - 10.1.0.0/16
- - 172.100.0.0/16
- tags:
- testing: testing
- delete: on-exit
- resource_group: "{{ resource_group }}"
-
-- name: Create virtual network
- azure_rm_virtualnetwork:
- name: "{{ vnetname }}"
- address_prefixes_cidr:
- - 10.1.0.0/16
- - 172.100.0.0/16
- dns_servers:
- - 127.0.0.1
- - 127.0.0.3
- tags:
- testing: testing
- delete: on-exit
- resource_group: "{{ resource_group }}"
- register: output
-
-- assert:
- that:
- - "output.state.address_prefixes | length == 2"
- - "output.state.dns_servers | length == 2"
- - "output.state.tags.delete == 'on-exit'"
- - "output.state.tags | length == 2"
-
-- name: Attach a subnet
- azure_rm_subnet:
- resource_group: "{{ resource_group }}"
- name: "{{ vnetname }}"
- virtual_network_name: "{{ vnetname }}"
- address_prefix_cidr: "10.1.0.0/24"
-
-- name: Gather facts by name, tags
- azure_rm_virtualnetwork_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ vnetname }}"
- tags:
- - testing
- register: facts
-
-- assert:
- that:
- - "azure_virtualnetworks | length == 1"
- - "facts.virtualnetworks | length == 1"
- - "facts.virtualnetworks[0].dns_servers | length == 2"
- - "facts.virtualnetworks[0].address_prefixes | length == 2"
- - "facts.virtualnetworks[0].subnets | length == 1"
-
-- name: Gather facts by resource group, tags
- azure_rm_virtualnetwork_facts:
- resource_group: "{{ resource_group }}"
- tags:
- - testing
-
-- assert:
- that: "azure_virtualnetworks | length >= 1"
-
-- name: Gather facts by tags
- azure_rm_virtualnetwork_facts:
- tags:
- - testing
-
-- assert:
- that: "azure_virtualnetworks | length >= 1"
-
-- name: Should be idempotent
- azure_rm_virtualnetwork:
- name: "{{ vnetname }}"
- address_prefixes_cidr:
- - 10.1.0.0/16
- - 172.100.0.0/16
- dns_servers:
- - 127.0.0.1
- - 127.0.0.3
- tags:
- testing: testing
- delete: on-exit
- resource_group: "{{ resource_group }}"
- register: output
-
-- assert:
- that: not output.changed
-
-- name: Update tags
- azure_rm_virtualnetwork:
- name: "{{ vnetname }}"
- tags:
- testing: 'no'
- delete: never
- foo: bar
- resource_group: "{{ resource_group }}"
- register: output
-
-- assert:
- that: output.state.tags | length == 3
-
-- name: Purge tags
- azure_rm_virtualnetwork:
- name: "{{ vnetname }}"
- append_tags: no
- tags:
- testing: 'always'
- resource_group: "{{ resource_group }}"
- register: output
-
-- assert:
- that:
- - output.state.tags | length == 1
- - output.state.tags.testing == 'always'
-
-- name: Should require address_prefixes_cidr when purge_address_prefixes
- azure_rm_virtualnetwork:
- name: "{{ vnetname }}"
- purge_address_prefixes: true
- resource_group: "{{ resource_group }}"
- register: output
- ignore_errors: yes
-
-- assert:
- that: output.failed
-
-- name: Purge address prefixes
- azure_rm_virtualnetwork:
- name: "{{ vnetname }}"
- address_prefixes_cidr: 10.1.0.0/16
- purge_address_prefixes: true
- resource_group: "{{ resource_group }}"
- register: output
-
-- assert:
- that:
- - output.state.address_prefixes | length == 1
- - output.state.address_prefixes[0] == '10.1.0.0/16'
- - output.state.dns_servers | length == 2
- - output.state.dns_servers[0] == '127.0.0.1'
-
-- name: Purge DNS servers
- azure_rm_virtualnetwork:
- name: "{{ vnetname }}"
- purge_dns_servers: true
- resource_group: "{{ resource_group }}"
- register: output
-
-- assert:
- that: output.state['dns_servers'] is undefined
-
-- name: Gather facts
- azure_rm_virtualnetwork_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ vnetname }}"
- register: facts
-
-- assert:
- that:
- - azure_virtualnetworks | length == 1
- - facts.virtualnetworks | length == 1
- - "facts.virtualnetworks[0].subnets | length == 1"
-
-- name: Delete virtual network
- azure_rm_virtualnetwork:
- name: "{{ vnetname }}"
- resource_group: "{{ resource_group }}"
- state: absent
diff --git a/test/integration/targets/azure_rm_virtualnetworkgateway/aliases b/test/integration/targets/azure_rm_virtualnetworkgateway/aliases
deleted file mode 100644
index aa77c071a8..0000000000
--- a/test/integration/targets/azure_rm_virtualnetworkgateway/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/azure
-shippable/azure/group2
-destructive
diff --git a/test/integration/targets/azure_rm_virtualnetworkgateway/meta/main.yml b/test/integration/targets/azure_rm_virtualnetworkgateway/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_virtualnetworkgateway/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_virtualnetworkgateway/tasks/main.yml b/test/integration/targets/azure_rm_virtualnetworkgateway/tasks/main.yml
deleted file mode 100644
index 146d3769a5..0000000000
--- a/test/integration/targets/azure_rm_virtualnetworkgateway/tasks/main.yml
+++ /dev/null
@@ -1,126 +0,0 @@
-# FIXME: needs minimal tests (check mode?) that can run quickly, VNG creation takes > 20min
-
-- name: Prepare random number
- set_fact:
- vnetname: "vnet{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- vngname: "vng{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
-
-- name: Create virtual network gateway without bgp settings (check mode)
- check_mode: yes
- azure_rm_virtualnetworkgateway:
- resource_group: "{{ resource_group }}"
- name: "{{ vngname }}"
- ip_configurations:
- - name: testipconfig
- private_ip_allocation_method: Dynamic
- public_ip_address_name: testPublicIP
- virtual_network: "{{ vnetname }}"
- tags:
- common: "xyz"
- register: output
-
-- assert:
- that: output.changed
-
-- name: long-running virtualnetworkgateway tests [run with `--tags long_run,untagged` to enable]
- tags: [long_run, never]
- block:
- - name: Create virtual network
- azure_rm_virtualnetwork:
- resource_group: "{{ resource_group }}"
- name: "{{ vnetname }}"
- address_prefixes: "10.0.0.0/16"
-
- - name: Add subnet
- azure_rm_subnet:
- resource_group: "{{ resource_group }}"
- name: GatewaySubnet
- address_prefix: "10.0.2.0/24"
- virtual_network: "{{ vnetname }}"
-
- - name: Create public IP address
- azure_rm_publicipaddress:
- resource_group: "{{ resource_group }}"
- allocation_method: Dynamic
- name: testPublicIP
-
- - name: Create virtual network gateway without bgp settings
- azure_rm_virtualnetworkgateway:
- resource_group: "{{ resource_group }}"
- name: "{{ vngname }}"
- ip_configurations:
- - name: testipconfig
- private_ip_allocation_method: Dynamic
- public_ip_address_name: testPublicIP
- virtual_network: "{{ vnetname }}"
- tags:
- common: "xyz"
- register: output
-
- - assert:
- that: output.changed
-
- - name: Create virtual network gateway without bgp settings
- azure_rm_virtualnetworkgateway:
- resource_group: "{{ resource_group }}"
- name: "{{ vngname }}"
- ip_configurations:
- - name: testipconfig
- private_ip_allocation_method: Dynamic
- public_ip_address_name: testPublicIP
- virtual_network: "{{ vnetname }}"
- tags:
- common: "xyz"
- register: output
-
- - assert:
- that: output.changed
-
- - name: Create virtual network gateway without bgp settings - idempotent
- azure_rm_virtualnetworkgateway:
- resource_group: "{{ resource_group }}"
- name: "{{ vngname }}"
- ip_configurations:
- - name: testipconfig
- private_ip_allocation_method: Dynamic
- public_ip_address_name: testPublicIP
- virtual_network: "{{ vnetname }}"
- tags:
- common: "xyz"
- register: output
-
- - assert:
- that: not output.changed
-
- - name: Update virtual network gateway
- azure_rm_virtualnetworkgateway:
- resource_group: "{{ resource_group }}"
- name: "{{ vngname }}"
- ip_configurations:
- - name: testipconfig
- private_ip_allocation_method: Dynamic
- public_ip_address_name: testPublicIP
- virtual_network: "{{ vnetname }}"
- tags:
- common: "mno"
- register: output
- - assert:
- that: output.changed
-
- - name: Delete virtual network gateway
- azure_rm_virtualnetworkgateway:
- resource_group: "{{ resource_group }}"
- name: "{{ vngname }}"
- state: absent
- register: output
- - assert:
- that: output.changed
-
-- name: Delete virtual network gateway - idempotent
- azure_rm_virtualnetworkgateway:
- resource_group: "{{ resource_group }}"
- name: "{{ vngname }}"
- state: absent
- register: output
-- assert:
- that: not output.changed
diff --git a/test/integration/targets/azure_rm_virtualnetworkpeering/aliases b/test/integration/targets/azure_rm_virtualnetworkpeering/aliases
deleted file mode 100644
index aa77c071a8..0000000000
--- a/test/integration/targets/azure_rm_virtualnetworkpeering/aliases
+++ /dev/null
@@ -1,3 +0,0 @@
-cloud/azure
-shippable/azure/group2
-destructive
diff --git a/test/integration/targets/azure_rm_virtualnetworkpeering/meta/main.yml b/test/integration/targets/azure_rm_virtualnetworkpeering/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_virtualnetworkpeering/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_virtualnetworkpeering/tasks/main.yml b/test/integration/targets/azure_rm_virtualnetworkpeering/tasks/main.yml
deleted file mode 100644
index 9eb0424af3..0000000000
--- a/test/integration/targets/azure_rm_virtualnetworkpeering/tasks/main.yml
+++ /dev/null
@@ -1,126 +0,0 @@
-- name: Prepare random number
- set_fact:
- vnetname1: "vnet1{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- vnetname2: "vnet2{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 1000 | random }}"
- peering_name: "peering1{{ resource_group | hash('md5') | truncate(7, True, '') }}{{ 100 | random }}"
-
-- name: Create first virtual network
- azure_rm_virtualnetwork:
- name: "{{ vnetname1 }}"
- address_prefixes_cidr:
- - 10.1.0.0/16
- tags:
- testing: testing
- delete: on-exit
- resource_group: "{{ resource_group }}"
- register: vnet1
-
-- name: Create second virtual network
- azure_rm_virtualnetwork:
- name: "{{ vnetname2 }}"
- address_prefixes_cidr:
- - 10.2.0.0/24
- resource_group: "{{ resource_group_secondary }}"
- register: vnet2
-
-- assert:
- that:
- - vnet1.changed
- - vnet2.changed
-
-- name: Create virtual network peering (check mode)
- azure_rm_virtualnetworkpeering:
- resource_group: "{{ resource_group }}"
- name: "{{ peering_name }}"
- virtual_network: "{{ vnetname1 }}"
- remote_virtual_network:
- resource_group: "{{ resource_group_secondary }}"
- name: "{{ vnetname2 }}"
- allow_virtual_network_access: false
- allow_forwarded_traffic: true
- check_mode: yes
- register: output
-
-- assert:
- that: output.changed
-
-- name: Create virtual network peering
- azure_rm_virtualnetworkpeering:
- resource_group: "{{ resource_group }}"
- name: "{{ peering_name }}"
- virtual_network: "{{ vnetname1 }}"
- remote_virtual_network:
- resource_group: "{{ resource_group_secondary }}"
- name: "{{ vnetname2 }}"
- allow_virtual_network_access: false
- allow_forwarded_traffic: true
- register: output
-
-- assert:
- that: output.changed
-
-- name: Update virtual network peering (idempotent)
- azure_rm_virtualnetworkpeering:
- resource_group: "{{ resource_group }}"
- name: "{{ peering_name }}"
- virtual_network: "{{ vnetname1 }}"
- remote_virtual_network:
- resource_group: "{{ resource_group_secondary }}"
- name: "{{ vnetname2 }}"
- allow_virtual_network_access: false
- allow_forwarded_traffic: true
- register: output
-
-- assert:
- that: not output.changed
-
-- name: Update virtual network peering
- azure_rm_virtualnetworkpeering:
- resource_group: "{{ resource_group }}"
- name: "{{ peering_name }}"
- virtual_network: "{{ vnetname1 }}"
- remote_virtual_network:
- resource_group: "{{ resource_group_secondary }}"
- name: "{{ vnetname2 }}"
- allow_virtual_network_access: true
- allow_forwarded_traffic: false
- register: output
-
-- assert:
- that: output.changed
-
-- name: Get facts
- azure_rm_virtualnetworkpeering_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ peering_name }}"
- virtual_network: "{{ vnetname1 }}"
- register: facts
-
-- name: Assert Facts
- assert:
- that:
- - facts['vnetpeerings'] | length == 1
- - facts['vnetpeerings'][0]['id']
- - facts['vnetpeerings'][0]['peering_state']
- - facts['vnetpeerings'][0]['remote_virtual_network']
- - facts['vnetpeerings'][0]['provisioning_state']
-
-- name: Delete virtual network peering
- azure_rm_virtualnetworkpeering:
- resource_group: "{{ resource_group }}"
- name: "{{ peering_name }}"
- virtual_network: "{{ vnetname1 }}"
- state: absent
- register: output
-
-- name: Delete first virtual network
- azure_rm_virtualnetwork:
- name: "{{ vnetname1 }}"
- resource_group: "{{ resource_group }}"
- state: absent
-
-- name: Delete virtual network
- azure_rm_virtualnetwork:
- name: "{{ vnetname2 }}"
- resource_group: "{{ resource_group_secondary }}"
- state: absent \ No newline at end of file
diff --git a/test/integration/targets/azure_rm_webapp/aliases b/test/integration/targets/azure_rm_webapp/aliases
deleted file mode 100644
index c7c2aff0c0..0000000000
--- a/test/integration/targets/azure_rm_webapp/aliases
+++ /dev/null
@@ -1,5 +0,0 @@
-cloud/azure
-shippable/azure/group3
-destructive
-azure_rm_webapp_facts
-azure_rm_webappslot
diff --git a/test/integration/targets/azure_rm_webapp/meta/main.yml b/test/integration/targets/azure_rm_webapp/meta/main.yml
deleted file mode 100644
index 95e1952f98..0000000000
--- a/test/integration/targets/azure_rm_webapp/meta/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
-dependencies:
- - setup_azure
diff --git a/test/integration/targets/azure_rm_webapp/tasks/main.yml b/test/integration/targets/azure_rm_webapp/tasks/main.yml
deleted file mode 100644
index 8efc6629de..0000000000
--- a/test/integration/targets/azure_rm_webapp/tasks/main.yml
+++ /dev/null
@@ -1,434 +0,0 @@
-- name: Fix resource prefix
- set_fact:
- linux_app_plan_resource_group: "{{ resource_group_secondary }}"
- win_app_name: "{{ (resource_prefix | replace('-','x'))[-8:] }}{{ 1000 | random}}winapp"
- win_plan_name: "{{ (resource_prefix | replace('-','x'))[-8:] }}winplan"
- linux_plan_name: "{{ (resource_group_secondary | replace('-','x'))[-8:] }}linplan"
- slot1_name: "stage1"
-
-- name: Create a windows web app with non-exist app service plan
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}1"
- plan:
- resource_group: "{{ resource_group }}"
- name: "{{ win_plan_name }}"
- is_linux: false
- sku: S1
-
-- name: Create a windows web app with existing app service plan
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}2"
- plan: "{{ win_plan_name }}"
- register: output
-
-- name: stop the web app
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}2"
- plan: "{{ win_plan_name }}"
- app_state: stopped
- register: output
-
-- name: assert output changed
- assert:
- that:
- output.changed
-
-# enable after webapp_facts merged
-# - name: get the web app
-# azure_rm_webapp_facts:
-# resource_group: "{{ resource_group }}"
-# name: "{{ win_app_name }}2"
-# register: stopped
-
-# - name: assert web app is stopped
-# assert:
-# that:
-# - stopped.properties.state == "Stopped"
-
-- name: Create a windows web app with existing app service plan, try to update some root level params
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}3"
- plan: "{{ win_plan_name }}"
- dns_registration: true
- https_only: true
- tags:
- testwebapptag: test
- register: output
-
-- name: get web app with resource group and tag
- azure_rm_webapp_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}3"
- tags:
- - testwebapptag
- register: output
-
-- assert:
- that:
- - output.webapps | length == 1
-
-- name: Create a win web app with java run time specific
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}4"
- plan: "{{ win_plan_name }}"
- frameworks:
- - name: "java"
- version: "1.8"
- settings:
- java_container: "Tomcat"
- java_container_version: "8.0"
- app_settings:
- testkey: "testvalue"
- register: output
-
-- name: assert the web app was created
- assert:
- that: output.changed
-
-- name: get web app with name
- azure_rm_webapp_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}4"
- register: output
-
-- assert:
- that:
- - output.webapps | length == 1
- - output.webapps[0].app_settings | length == 1
- - output.webapps[0].frameworks | length > 1 # there's default frameworks eg net_framework
-
-- name: Update app settings and framework
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}4"
- plan: "{{ win_plan_name }}"
- frameworks:
- - name: "java"
- version: "1.7"
- settings:
- java_container: "Tomcat"
- java_container_version: "8.5"
- app_settings:
- testkey2: "testvalue2"
- register: output
-
-- name: Assert the web app was updated
- assert:
- that:
- - output.changed
-
-- name: get web app with name
- azure_rm_webapp_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}4"
- register: output
-
-- name: Assert updating
- assert:
- that:
- - output.webapps[0].app_settings | length == 2
- - output.webapps[0].app_settings['testkey'] == 'testvalue'
- - output.webapps[0].app_settings['testkey2'] == 'testvalue2'
-
-- name: get web app with return publishing profile
- azure_rm_webapp_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}4"
- return_publish_profile: true
- register: output
-
-- assert:
- that:
- - output.webapps | length == 1
- - output.webapps[0].publishing_username != ""
- - output.webapps[0].publishing_password != ""
-
-- name: Purge all existing app settings
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}4"
- plan: "{{ win_plan_name }}"
- purge_app_settings: true
- register: output
-
-- name: Assert the web app was updated
- assert:
- that: output.changed
-
-- name: Create a win web app with python run time and php run time
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}5"
- plan: "{{ win_plan_name }}"
- frameworks:
- - name: "python"
- version: "2.7"
- - name: node
- version: "6.6"
- - name: "php"
- version: "7.0"
- register: output
-
-- name: Assert the web app was created
- assert:
- that: output.changed
-
-- name: Create a docker web app with some app settings
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}6"
- plan:
- resource_group: "{{ linux_app_plan_resource_group }}"
- name: "{{ linux_plan_name }}"
- is_linux: true
- sku: S1
- number_of_workers: 1
- container_settings:
- name: "ansible/ansible:ubuntu1404"
- register: output
-
-- name: Assert the web app was created
- assert:
- that: output.changed
-
-- name: Create a docker web app with private acr registry
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}7"
- plan:
- resource_group: "{{ linux_app_plan_resource_group }}"
- name: "{{ linux_plan_name }}"
- container_settings:
- name: "ansible/ansible:ubuntu1404"
- registry_server_url: test.io
- registry_server_user: user
- registry_server_password: password
- register: output
-
-- name: Assert the web app was created
- assert:
- that: output.changed
-
-- name: Create a linux web app with nodejs framework
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}8"
- plan:
- resource_group: "{{ linux_app_plan_resource_group }}"
- name: "{{ linux_plan_name }}"
- frameworks:
- - name: node
- version: "6.6"
- register: output
-
-- name: Should be idempotent with linux web app created
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}8"
- plan:
- resource_group: "{{ linux_app_plan_resource_group }}"
- name: "{{ linux_plan_name }}"
- frameworks:
- - name: node
- version: "6.6"
- register: output
-
-- assert:
- that: not output.changed
-
-- name: Update nodejs framework
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}8"
- plan:
- resource_group: "{{ linux_app_plan_resource_group }}"
- name: "{{ linux_plan_name }}"
- frameworks:
- - name: node
- version: "6.9"
- register: output
-
-- name: Assert the web app was created
- assert:
- that: output.changed
-
-- name: Create a linux web app with deployment source github
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}10"
- plan:
- resource_group: "{{ linux_app_plan_resource_group }}"
- name: "{{ linux_plan_name }}"
- deployment_source:
- url: "https://github.com/test/test"
- branch: master
- scm_type: GitHub
- register: output
-
-- name: Assert the web app was created
- assert:
- that: output.changed
-
-- name: Delete web app
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}8"
- state: absent
- register: output
-
-- name: Assert the web app was deleted
- assert:
- that: output.changed
-
-- name: assert error that java is mutually exclusive with frameworks
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}11"
- plan: "{{ win_plan_name }}"
- frameworks:
- - name: "python"
- version: "2.7"
- - name: "java"
- version: "1.8"
- register: fail_win_java_version_mutual_exclusive
- failed_when: 'fail_win_java_version_mutual_exclusive.msg != "Java is mutually exclusive with other frameworks."'
-
-- name: assert error when linux web app, only can specify one framework
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ linux_plan_name }}12"
- plan:
- resource_group: "{{ linux_app_plan_resource_group }}"
- name: "{{ linux_plan_name }}"
- frameworks:
- - name: "python"
- version: "2.7"
- - name: "node"
- version: "6.6"
- register: fail_linux_one_framework_only
- failed_when: fail_linux_one_framework_only.msg != "Can specify one framework only for Linux web app."
-
-- name: Create a linux web app with java tomcat container
- azure_rm_webapp:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}13"
- plan:
- resource_group: "{{ linux_app_plan_resource_group }}"
- name: "{{ linux_plan_name }}"
- frameworks:
- - name: java
- version: "8"
- settings:
- java_container: "tomcat"
- java_container_version: "8.5"
- register: output
-
-- name: Assert the web app was created
- assert:
- that: output.changed
-
-- name: Get facts with publish profile
- azure_rm_webapp_facts:
- resource_group: "{{ resource_group }}"
- name: "{{ win_app_name }}13"
- no_log: true
- register: facts
-
-- name: Assert publish profile returned
- assert:
- that:
- - facts.webapps[0].ftp_publish_url != ''
-
-- name: Create a webapp slot (Check mode)
- azure_rm_webappslot:
- resource_group: "{{ resource_group }}"
- webapp_name: "{{ win_app_name }}13"
- name: "{{ slot1_name }}"
- configuration_source: "{{ win_app_name }}13"
- app_settings:
- testkey: testvalue
- check_mode: yes
- register: output
-
-- name: Assert slot check mode creation
- assert:
- that:
- - output.changed
-
-- name: Create a webapp slot
- azure_rm_webappslot:
- resource_group: "{{ resource_group }}"
- webapp_name: "{{ win_app_name }}13"
- name: "{{ slot1_name }}"
- configuration_source: "{{ win_app_name }}13"
- app_settings:
- testkey: testvalueslot
- register: output
-
-- name: Assert slot creation
- assert:
- that:
- - output.changed
-
-- name: Update webapp slot (idempotence)
- azure_rm_webappslot:
- resource_group: "{{ resource_group }}"
- webapp_name: "{{ win_app_name }}13"
- name: "{{ slot1_name }}"
- app_settings:
- testkey: testvalueslot
- register: output
-
-- name: Assert idempotence
- assert:
- that:
- - not output.changed
-
-- name: Update webapp slot
- azure_rm_webappslot:
- resource_group: "{{ resource_group }}"
- webapp_name: "{{ win_app_name }}13"
- name: "{{ slot1_name }}"
- frameworks:
- - name: "node"
- version: "10.1"
- app_settings:
- testkey: testvalue2
- register: output
-
-- name: Assert updating
- assert:
- that:
- - output.changed
-
-- name: Swap webapp slot
- azure_rm_webappslot:
- resource_group: "{{ resource_group }}"
- webapp_name: "{{ win_app_name }}13"
- name: "{{ slot1_name }}"
- swap:
- action: swap
- register: output
-
-- name: Assert swap
- assert:
- that:
- - output.changed
-
-- name: Stop webapp slot
- azure_rm_webappslot:
- resource_group: "{{ resource_group }}"
- webapp_name: "{{ win_app_name }}13"
- name: "{{ slot1_name }}"
- app_state: stopped
- register: output
-
-- name: Assert stopped
- assert:
- that:
- - output.changed \ No newline at end of file
diff --git a/test/sanity/ignore.txt b/test/sanity/ignore.txt
index 2922237204..4abc91820e 100644
--- a/test/sanity/ignore.txt
+++ b/test/sanity/ignore.txt
@@ -57,12 +57,6 @@ lib/ansible/module_utils/_text.py future-import-boilerplate
lib/ansible/module_utils/_text.py metaclass-boilerplate
lib/ansible/module_utils/api.py future-import-boilerplate
lib/ansible/module_utils/api.py metaclass-boilerplate
-lib/ansible/module_utils/azure_rm_common.py future-import-boilerplate
-lib/ansible/module_utils/azure_rm_common.py metaclass-boilerplate
-lib/ansible/module_utils/azure_rm_common_ext.py future-import-boilerplate
-lib/ansible/module_utils/azure_rm_common_ext.py metaclass-boilerplate
-lib/ansible/module_utils/azure_rm_common_rest.py future-import-boilerplate
-lib/ansible/module_utils/azure_rm_common_rest.py metaclass-boilerplate
lib/ansible/module_utils/basic.py metaclass-boilerplate
lib/ansible/module_utils/common/network.py future-import-boilerplate
lib/ansible/module_utils/common/network.py metaclass-boilerplate
@@ -115,661 +109,6 @@ lib/ansible/module_utils/urls.py pylint:blacklisted-name
lib/ansible/module_utils/urls.py replace-urlopen
lib/ansible/module_utils/yumdnf.py future-import-boilerplate
lib/ansible/module_utils/yumdnf.py metaclass-boilerplate
-lib/ansible/modules/cloud/azure/azure_rm_acs.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_acs.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_acs.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_acs.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_aks.py validate-modules:doc-choices-do-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_aks.py validate-modules:doc-default-does-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_aks.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_aks.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_aks.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_aks.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_aks.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_aks.py validate-modules:undocumented-parameter
-lib/ansible/modules/cloud/azure/azure_rm_aks_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_aks_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_aks_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_aks_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_aksversion_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_aksversion_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_aksversion_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_appgateway.py validate-modules:doc-choices-do-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_appgateway.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_appgateway.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_appgateway.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_appgateway.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_appgateway.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_applicationsecuritygroup.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_applicationsecuritygroup.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_applicationsecuritygroup.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_applicationsecuritygroup_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_applicationsecuritygroup_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_applicationsecuritygroup_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_applicationsecuritygroup_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_appserviceplan.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_appserviceplan.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_appserviceplan.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_appserviceplan_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_appserviceplan_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_appserviceplan_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_appserviceplan_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_automationaccount.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_automationaccount.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_automationaccount_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_automationaccount_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_automationaccount_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_autoscale.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_autoscale.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_autoscale.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_autoscale.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_autoscale.py validate-modules:undocumented-parameter
-lib/ansible/modules/cloud/azure/azure_rm_autoscale_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_autoscale_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_autoscale_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_autoscale_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_availabilityset.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_availabilityset.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_availabilityset.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_availabilityset_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_availabilityset_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_availabilityset_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_availabilityset_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_azurefirewall.py validate-modules:invalid-ansiblemodule-schema
-lib/ansible/modules/cloud/azure/azure_rm_azurefirewall.py validate-modules:missing-suboption-docs
-lib/ansible/modules/cloud/azure/azure_rm_azurefirewall.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_azurefirewall.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_azurefirewall.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_azurefirewall.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_azurefirewall.py validate-modules:undocumented-parameter
-lib/ansible/modules/cloud/azure/azure_rm_azurefirewall_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_azurefirewall_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_batchaccount.py validate-modules:invalid-ansiblemodule-schema
-lib/ansible/modules/cloud/azure/azure_rm_batchaccount.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_batchaccount.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_batchaccount.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_cdnendpoint_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_cdnprofile.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_cdnprofile.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_cdnprofile.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_cdnprofile_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_cdnprofile_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_cdnprofile_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_cdnprofile_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_containerinstance.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_containerinstance.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_containerinstance.py validate-modules:doc-type-does-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_containerinstance.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_containerinstance.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_containerinstance.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_containerinstance.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_containerinstance_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_containerinstance_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_containerinstance_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_containerinstance_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_containerregistry.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_containerregistry.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_containerregistry.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_containerregistry_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_containerregistry_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_containerregistry_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_containerregistry_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount.py validate-modules:nonexistent-parameter-documented
-lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount.py validate-modules:undocumented-parameter
-lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_cosmosdbaccount_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_deployment.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_deployment.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_deployment.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_deployment.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_deployment.py yamllint:unparsable-with-libyaml
-lib/ansible/modules/cloud/azure/azure_rm_deployment_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_deployment_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_deployment_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_deployment_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_devtestlab.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_devtestlab.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlab.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_devtestlab_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_devtestlab_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlab_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_devtestlab_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabarmtemplate_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabarmtemplate_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabarmtemplate_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifact_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifact_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifact_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifactsource.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifactsource.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifactsource.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifactsource_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifactsource_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifactsource_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabartifactsource_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabcustomimage.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabcustomimage.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabcustomimage.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabcustomimage_info.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabcustomimage_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabcustomimage_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabcustomimage_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabcustomimage_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabenvironment.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabenvironment.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabenvironment.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabenvironment.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabenvironment_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabenvironment_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabenvironment_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabenvironment_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabpolicy.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabpolicy.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabpolicy.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabpolicy_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabpolicy_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabpolicy_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabpolicy_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine.py validate-modules:nonexistent-parameter-documented
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine.py validate-modules:undocumented-parameter
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualmachine_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualnetwork.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualnetwork.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualnetwork.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualnetwork_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_devtestlabvirtualnetwork_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset.py validate-modules:doc-missing-type
-lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset.py validate-modules:invalid-ansiblemodule-schema
-lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_dnsrecordset_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_dnszone.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_dnszone.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_dnszone.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_dnszone.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_dnszone_info.py validate-modules:doc-type-does-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_dnszone_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_dnszone_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_dnszone_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_dnszone_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_dnszone_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_functionapp.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_functionapp.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_functionapp.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_functionapp.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_functionapp_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_functionapp_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_functionapp_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_functionapp_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_gallery.py validate-modules:invalid-ansiblemodule-schema
-lib/ansible/modules/cloud/azure/azure_rm_gallery.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_gallery.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_gallery_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_gallery_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_galleryimage.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_galleryimage.py validate-modules:invalid-ansiblemodule-schema
-lib/ansible/modules/cloud/azure/azure_rm_galleryimage.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_galleryimage.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_galleryimage.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_galleryimage_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_galleryimage_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_galleryimage_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion.py validate-modules:doc-choices-do-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion.py validate-modules:doc-type-does-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion.py validate-modules:invalid-ansiblemodule-schema
-lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_galleryimageversion_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_hdinsightcluster.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_hdinsightcluster.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_hdinsightcluster.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_hdinsightcluster.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_hdinsightcluster_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_hdinsightcluster_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_hdinsightcluster_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_hdinsightcluster_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_hdinsightcluster_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_image.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_image.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_image.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_image.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_image.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_image_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_image_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_image_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_image_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_image_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_iotdevice.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_iotdevice.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_iotdevice_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_iotdevice_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_iotdevicemodule.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_iotdevicemodule.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_iothub.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_iothub.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_iothub.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_iothub_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_iothub_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_iothub_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_iothubconsumergroup.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_iothubconsumergroup.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_iothubconsumergroup.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_keyvault.py validate-modules:doc-choices-do-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_keyvault.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_keyvault.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_keyvault.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_keyvault.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_keyvault.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_keyvault_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_keyvault_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_keyvault_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_keyvault_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_keyvaultkey.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_keyvaultkey.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_keyvaultkey.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_keyvaultkey_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_keyvaultkey_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_keyvaultkey_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_keyvaultkey_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_keyvaultsecret.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_keyvaultsecret.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_keyvaultsecret.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_loadbalancer.py validate-modules:doc-choices-do-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_loadbalancer.py validate-modules:doc-default-does-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_loadbalancer.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_loadbalancer.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_loadbalancer.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_loadbalancer.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_loadbalancer.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_loadbalancer_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_loadbalancer_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_loadbalancer_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_loadbalancer_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_lock.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_lock.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_lock_info.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_lock_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_lock_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_loganalyticsworkspace.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_loganalyticsworkspace.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_loganalyticsworkspace.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_loganalyticsworkspace_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_loganalyticsworkspace_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_loganalyticsworkspace_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_loganalyticsworkspace_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_manageddisk.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_manageddisk.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_manageddisk.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_manageddisk_info.py validate-modules:doc-type-does-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_manageddisk_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_manageddisk_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_manageddisk_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_mariadbconfiguration.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_mariadbconfiguration.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_mariadbconfiguration.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_mariadbconfiguration_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_mariadbconfiguration_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_mariadbconfiguration_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_mariadbdatabase.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_mariadbdatabase.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_mariadbdatabase.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_mariadbdatabase_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_mariadbdatabase_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_mariadbfirewallrule.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_mariadbfirewallrule.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_mariadbfirewallrule.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_mariadbfirewallrule_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_mariadbfirewallrule_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_mariadbserver.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_mariadbserver.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_mariadbserver.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_mariadbserver_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_mariadbserver_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_mariadbserver_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_monitorlogprofile.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_monitorlogprofile.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_monitorlogprofile.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_mysqlconfiguration.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_mysqlconfiguration.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_mysqlconfiguration.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_mysqlconfiguration_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_mysqlconfiguration_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_mysqlconfiguration_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_mysqldatabase.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_mysqldatabase.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_mysqldatabase.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_mysqldatabase_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_mysqldatabase_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_mysqlfirewallrule.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_mysqlfirewallrule.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_mysqlfirewallrule.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_mysqlfirewallrule.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_mysqlfirewallrule_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_mysqlfirewallrule_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_mysqlserver.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_mysqlserver.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_mysqlserver.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_mysqlserver_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_mysqlserver_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_mysqlserver_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py validate-modules:doc-missing-type
-lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_networkinterface.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_networkinterface_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_networkinterface_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_networkinterface_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_networkinterface_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_networkinterface_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_postgresqlconfiguration.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_postgresqlconfiguration.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_postgresqlconfiguration.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_postgresqlconfiguration_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_postgresqlconfiguration_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_postgresqlconfiguration_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_postgresqldatabase.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_postgresqldatabase.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_postgresqldatabase.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_postgresqldatabase_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_postgresqldatabase_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_postgresqlfirewallrule.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_postgresqlfirewallrule.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_postgresqlfirewallrule.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_postgresqlfirewallrule_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_postgresqlfirewallrule_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_postgresqlserver.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_postgresqlserver.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_postgresqlserver.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_postgresqlserver_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_postgresqlserver_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_postgresqlserver_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_publicipaddress.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_publicipaddress.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_publicipaddress.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_publicipaddress.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_publicipaddress.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_publicipaddress_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_publicipaddress_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_publicipaddress_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_publicipaddress_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_rediscache.py validate-modules:doc-default-does-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_rediscache.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_rediscache.py validate-modules:doc-type-does-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_rediscache.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_rediscache.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_rediscache.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_rediscache_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_rediscache_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_rediscache_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_rediscache_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_rediscachefirewallrule.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_rediscachefirewallrule.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_rediscachefirewallrule.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_resource.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_resource.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_resource.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_resource.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_resource_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_resource_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_resource_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_resource_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_resourcegroup.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_resourcegroup.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_resourcegroup.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_resourcegroup_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_resourcegroup_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_resourcegroup_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_resourcegroup_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_roleassignment.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_roleassignment.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_roleassignment.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_roleassignment_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_roleassignment_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_roleassignment_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_roledefinition.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_roledefinition.py validate-modules:invalid-ansiblemodule-schema
-lib/ansible/modules/cloud/azure/azure_rm_roledefinition.py validate-modules:invalid-argument-spec
-lib/ansible/modules/cloud/azure/azure_rm_roledefinition.py validate-modules:missing-suboption-docs
-lib/ansible/modules/cloud/azure/azure_rm_roledefinition.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_roledefinition.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_roledefinition.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_roledefinition.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_roledefinition_info.py validate-modules:invalid-ansiblemodule-schema
-lib/ansible/modules/cloud/azure/azure_rm_roledefinition_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_roledefinition_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_roledefinition_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_roledefinition_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_route.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_route.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_route.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_routetable.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_routetable.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_routetable.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_routetable_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_routetable_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_routetable_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_routetable_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py validate-modules:doc-choices-do-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py validate-modules:doc-default-does-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py validate-modules:missing-suboption-docs
-lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py validate-modules:mutually_exclusive-unknown
-lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py validate-modules:undocumented-parameter
-lib/ansible/modules/cloud/azure/azure_rm_securitygroup_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_securitygroup_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_securitygroup_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_securitygroup_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_servicebus.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_servicebus.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_servicebus.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_servicebus_info.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_servicebus_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_servicebus_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_servicebus_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_servicebus_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_servicebusqueue.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_servicebusqueue.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_servicebusqueue.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_servicebussaspolicy.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_servicebussaspolicy.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_servicebussaspolicy.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_servicebussaspolicy.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_servicebustopic.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_servicebustopic.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_servicebustopic.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_servicebustopic.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_servicebustopicsubscription.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_servicebustopicsubscription.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_servicebustopicsubscription.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_snapshot.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_snapshot.py validate-modules:invalid-ansiblemodule-schema
-lib/ansible/modules/cloud/azure/azure_rm_snapshot.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_snapshot.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_sqldatabase.py validate-modules:invalid-ansiblemodule-schema
-lib/ansible/modules/cloud/azure/azure_rm_sqldatabase.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_sqldatabase.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_sqldatabase.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_sqldatabase_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_sqldatabase_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_sqldatabase_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_sqldatabase_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_sqlfirewallrule_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_sqlserver.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_sqlserver.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_sqlserver.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_sqlserver_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_sqlserver_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_sqlserver_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_storageaccount.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_storageaccount.py validate-modules:doc-missing-type
-lib/ansible/modules/cloud/azure/azure_rm_storageaccount.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_storageaccount.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_storageaccount.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_storageaccount.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_storageaccount_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_storageaccount_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_storageaccount_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_storageaccount_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_storageaccount_info.py validate-modules:return-syntax-error
-lib/ansible/modules/cloud/azure/azure_rm_storageblob.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_storageblob.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_storageblob.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_subnet.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_subnet.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_subnet.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_subnet.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_subnet_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_subnet_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_subnet_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerendpoint.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerendpoint.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerendpoint.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerendpoint.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerendpoint_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerendpoint_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerendpoint_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile.py validate-modules:doc-choices-do-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile.py validate-modules:doc-default-does-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile.py validate-modules:undocumented-parameter
-lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_trafficmanagerprofile_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachine_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachine_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachine_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachine_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachineextension.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachineextension.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachineextension.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachineextension_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachineextension_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachineextension_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachineextension_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachineimage_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachineimage_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachineimage_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetextension.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetextension.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetextension.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetextension.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetextension_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetextension_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetextension_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetinstance.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetinstance.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetinstance.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetinstance.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetinstance_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetinstance_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetinstance_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescalesetinstance_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetwork_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkgateway.py validate-modules:doc-choices-do-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkgateway.py validate-modules:doc-default-does-not-match-spec
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkgateway.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkgateway.py validate-modules:doc-missing-type
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkgateway.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkgateway.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkgateway.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkgateway.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkpeering.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkpeering.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkpeering.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkpeering.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkpeering_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkpeering_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_virtualnetworkpeering_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_webapp.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_webapp.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_webapp.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_webapp.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_webapp.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_webapp_info.py validate-modules:parameter-list-no-elements
-lib/ansible/modules/cloud/azure/azure_rm_webapp_info.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_webapp_info.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_webapp_info.py validate-modules:required_if-unknown-key
-lib/ansible/modules/cloud/azure/azure_rm_webappslot.py validate-modules:doc-elements-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_webappslot.py validate-modules:doc-required-mismatch
-lib/ansible/modules/cloud/azure/azure_rm_webappslot.py validate-modules:parameter-type-not-in-doc
-lib/ansible/modules/cloud/azure/azure_rm_webappslot.py validate-modules:required_if-requirements-unknown
-lib/ansible/modules/cloud/azure/azure_rm_webappslot.py validate-modules:required_if-unknown-key
lib/ansible/modules/commands/command.py validate-modules:doc-missing-type
lib/ansible/modules/commands/command.py validate-modules:nonexistent-parameter-documented
lib/ansible/modules/commands/command.py validate-modules:parameter-list-no-elements
@@ -1030,10 +369,6 @@ lib/ansible/playbook/role/__init__.py pylint:blacklisted-name
lib/ansible/plugins/action/normal.py action-plugin-docs # default action plugin for modules without a dedicated action plugin
lib/ansible/plugins/action/vyos.py action-plugin-docs # base class for deprecated network platform modules using `connection: local`
lib/ansible/plugins/cache/base.py ansible-doc!skip # not a plugin, but a stub for backwards compatibility
-lib/ansible/plugins/doc_fragments/azure.py future-import-boilerplate
-lib/ansible/plugins/doc_fragments/azure.py metaclass-boilerplate
-lib/ansible/plugins/doc_fragments/azure_tags.py future-import-boilerplate
-lib/ansible/plugins/doc_fragments/azure_tags.py metaclass-boilerplate
lib/ansible/plugins/doc_fragments/backup.py future-import-boilerplate
lib/ansible/plugins/doc_fragments/backup.py metaclass-boilerplate
lib/ansible/plugins/doc_fragments/constructed.py future-import-boilerplate