summaryrefslogtreecommitdiff
path: root/lib/ansible/modules/cloud
diff options
context:
space:
mode:
Diffstat (limited to 'lib/ansible/modules/cloud')
-rw-r--r--lib/ansible/modules/cloud/amazon/GUIDELINES.md264
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudformation_facts.py290
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudtrail.py245
-rw-r--r--lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py415
-rw-r--r--lib/ansible/modules/cloud/amazon/dynamodb_table.py422
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_ami_copy.py259
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_asg_facts.py359
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_customer_gateway.py271
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_elb_facts.py253
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_eni.py576
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_eni_facts.py185
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_group_facts.py167
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_lc_facts.py229
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_lc_find.py229
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_remote_facts.py192
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py233
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vol_facts.py145
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options.py389
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options_facts.py171
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_igw.py162
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py548
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_nacl_facts.py205
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway.py1089
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_net_facts.py131
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_peer.py367
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_route_table.py637
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_route_table_facts.py131
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_subnet.py276
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_facts.py147
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py602
-rw-r--r--lib/ansible/modules/cloud/amazon/ec2_win_password.py180
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_cluster.py243
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_service.py433
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_service_facts.py240
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_task.py329
-rw-r--r--lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py344
-rw-r--r--lib/ansible/modules/cloud/amazon/efs.py630
-rw-r--r--lib/ansible/modules/cloud/amazon/efs_facts.py379
-rw-r--r--lib/ansible/modules/cloud/amazon/execute_lambda.py287
-rw-r--r--lib/ansible/modules/cloud/amazon/iam_mfa_device_facts.py122
-rw-r--r--lib/ansible/modules/cloud/amazon/iam_server_certificate_facts.py176
-rw-r--r--lib/ansible/modules/cloud/amazon/kinesis_stream.py1102
-rw-r--r--lib/ansible/modules/cloud/amazon/lambda.py473
-rw-r--r--lib/ansible/modules/cloud/amazon/lambda_alias.py389
-rw-r--r--lib/ansible/modules/cloud/amazon/lambda_event.py427
-rw-r--r--lib/ansible/modules/cloud/amazon/lambda_facts.py413
-rw-r--r--lib/ansible/modules/cloud/amazon/redshift.py479
-rw-r--r--lib/ansible/modules/cloud/amazon/redshift_subnet_group.py186
-rw-r--r--lib/ansible/modules/cloud/amazon/route53_facts.py440
-rw-r--r--lib/ansible/modules/cloud/amazon/route53_health_check.py364
-rw-r--r--lib/ansible/modules/cloud/amazon/route53_zone.py236
-rw-r--r--lib/ansible/modules/cloud/amazon/s3_bucket.py437
-rw-r--r--lib/ansible/modules/cloud/amazon/s3_lifecycle.py439
-rw-r--r--lib/ansible/modules/cloud/amazon/s3_logging.py184
-rw-r--r--lib/ansible/modules/cloud/amazon/s3_website.py297
-rw-r--r--lib/ansible/modules/cloud/amazon/sns_topic.py410
-rw-r--r--lib/ansible/modules/cloud/amazon/sqs_queue.py321
-rw-r--r--lib/ansible/modules/cloud/amazon/sts_assume_role.py157
-rw-r--r--lib/ansible/modules/cloud/amazon/sts_session_token.py164
-rw-r--r--lib/ansible/modules/cloud/atomic/__init__.py0
-rw-r--r--lib/ansible/modules/cloud/atomic/atomic_host.py110
-rw-r--r--lib/ansible/modules/cloud/atomic/atomic_image.py143
-rw-r--r--lib/ansible/modules/cloud/azure/azure_rm_deployment.py665
-rw-r--r--lib/ansible/modules/cloud/centurylink/clc_aa_policy.py360
-rw-r--r--lib/ansible/modules/cloud/centurylink/clc_alert_policy.py541
-rw-r--r--lib/ansible/modules/cloud/centurylink/clc_blueprint_package.py306
-rw-r--r--lib/ansible/modules/cloud/centurylink/clc_firewall_policy.py601
-rw-r--r--lib/ansible/modules/cloud/centurylink/clc_group.py521
-rw-r--r--lib/ansible/modules/cloud/centurylink/clc_loadbalancer.py945
-rw-r--r--lib/ansible/modules/cloud/centurylink/clc_modify_server.py981
-rw-r--r--lib/ansible/modules/cloud/centurylink/clc_publicip.py372
-rw-r--r--lib/ansible/modules/cloud/centurylink/clc_server.py1592
-rw-r--r--lib/ansible/modules/cloud/centurylink/clc_server_snapshot.py417
-rw-r--r--lib/ansible/modules/cloud/cloudstack/__init__.py0
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_account.py385
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_affinitygroup.py255
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_cluster.py421
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_configuration.py292
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_domain.py274
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_facts.py226
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_firewall.py433
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_instance.py1034
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_instance_facts.py278
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_instancegroup.py205
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_ip_address.py244
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_iso.py339
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_loadbalancer_rule.py384
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_loadbalancer_rule_member.py364
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_network.py564
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_nic.py297
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_pod.py305
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_portforward.py387
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_project.py311
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_region.py208
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_resourcelimit.py220
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_router.py378
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_securitygroup.py223
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_securitygroup_rule.py425
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_snapshot_policy.py387
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_sshkeypair.py255
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_staticnat.py282
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_template.py672
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_user.py455
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_vmsnapshot.py304
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_volume.py496
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_vpc.py391
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_zone.py406
-rw-r--r--lib/ansible/modules/cloud/cloudstack/cs_zone_facts.py205
-rw-r--r--lib/ansible/modules/cloud/google/gcdns_record.py794
-rw-r--r--lib/ansible/modules/cloud/google/gcdns_zone.py385
-rw-r--r--lib/ansible/modules/cloud/google/gce_img.py233
-rw-r--r--lib/ansible/modules/cloud/google/gce_tag.py232
-rw-r--r--lib/ansible/modules/cloud/lxc/__init__.py0
-rw-r--r--lib/ansible/modules/cloud/lxc/lxc_container.py1765
-rw-r--r--lib/ansible/modules/cloud/lxd/__init__.py0
-rw-r--r--lib/ansible/modules/cloud/lxd/lxd_container.py615
-rw-r--r--lib/ansible/modules/cloud/lxd/lxd_profile.py378
-rw-r--r--lib/ansible/modules/cloud/misc/__init__.py0
-rw-r--r--lib/ansible/modules/cloud/misc/ovirt.py527
-rw-r--r--lib/ansible/modules/cloud/misc/proxmox.py591
-rw-r--r--lib/ansible/modules/cloud/misc/proxmox_kvm.py1058
-rw-r--r--lib/ansible/modules/cloud/misc/proxmox_template.py261
-rw-r--r--lib/ansible/modules/cloud/misc/rhevm.py1534
-rw-r--r--lib/ansible/modules/cloud/misc/virt.py538
-rw-r--r--lib/ansible/modules/cloud/misc/virt_net.py622
-rw-r--r--lib/ansible/modules/cloud/misc/virt_pool.py721
-rw-r--r--lib/ansible/modules/cloud/openstack/os_flavor_facts.py249
-rw-r--r--lib/ansible/modules/cloud/openstack/os_group.py171
-rw-r--r--lib/ansible/modules/cloud/openstack/os_ironic_inspect.py173
-rw-r--r--lib/ansible/modules/cloud/openstack/os_keystone_domain.py195
-rw-r--r--lib/ansible/modules/cloud/openstack/os_keystone_domain_facts.py144
-rw-r--r--lib/ansible/modules/cloud/openstack/os_keystone_role.py140
-rw-r--r--lib/ansible/modules/cloud/openstack/os_keystone_service.py214
-rw-r--r--lib/ansible/modules/cloud/openstack/os_port_facts.py229
-rw-r--r--lib/ansible/modules/cloud/openstack/os_project.py232
-rw-r--r--lib/ansible/modules/cloud/openstack/os_project_facts.py171
-rw-r--r--lib/ansible/modules/cloud/openstack/os_recordset.py246
-rw-r--r--lib/ansible/modules/cloud/openstack/os_server_group.py186
-rw-r--r--lib/ansible/modules/cloud/openstack/os_stack.py267
-rw-r--r--lib/ansible/modules/cloud/openstack/os_user_facts.py180
-rw-r--r--lib/ansible/modules/cloud/openstack/os_user_role.py216
-rw-r--r--lib/ansible/modules/cloud/openstack/os_zone.py241
-rw-r--r--lib/ansible/modules/cloud/ovh/__init__.py0
-rw-r--r--lib/ansible/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py316
-rw-r--r--lib/ansible/modules/cloud/ovirt/__init__.py0
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_affinity_labels.py207
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_affinity_labels_facts.py158
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_auth.py234
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_clusters.py564
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_clusters_facts.py103
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_datacenters.py221
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_datacenters_facts.py102
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_disks.py322
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_external_providers.py248
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_external_providers_facts.py151
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_groups.py182
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_groups_facts.py102
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_host_networks.py368
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_host_pm.py236
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_hosts.py326
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py99
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_mac_pools.py180
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_networks.py268
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_networks_facts.py104
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_nics.py247
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_nics_facts.py122
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_permissions.py291
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_permissions_facts.py140
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_quotas.py298
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_quotas_facts.py121
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_storage_domains.py444
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_storage_domains_facts.py104
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_templates.py314
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_templates_facts.py104
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_users.py169
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_users_facts.py102
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_vmpools.py220
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_vmpools_facts.py101
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_vms.py887
-rw-r--r--lib/ansible/modules/cloud/ovirt/ovirt_vms_facts.py104
-rw-r--r--lib/ansible/modules/cloud/profitbricks/__init__.py0
-rw-r--r--lib/ansible/modules/cloud/profitbricks/profitbricks.py674
-rw-r--r--lib/ansible/modules/cloud/profitbricks/profitbricks_datacenter.py263
-rw-r--r--lib/ansible/modules/cloud/profitbricks/profitbricks_nic.py295
-rw-r--r--lib/ansible/modules/cloud/profitbricks/profitbricks_volume.py434
-rw-r--r--lib/ansible/modules/cloud/profitbricks/profitbricks_volume_attachments.py267
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_clb_ssl.py274
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_mon_alarm.py232
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_mon_check.py318
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_mon_entity.py197
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_mon_notification.py181
-rw-r--r--lib/ansible/modules/cloud/rackspace/rax_mon_notification_plan.py186
-rw-r--r--lib/ansible/modules/cloud/serverless.py191
-rw-r--r--lib/ansible/modules/cloud/smartos/__init__.py0
-rw-r--r--lib/ansible/modules/cloud/smartos/smartos_image_facts.py123
-rw-r--r--lib/ansible/modules/cloud/softlayer/__init__.py0
-rw-r--r--lib/ansible/modules/cloud/softlayer/sl_vm.py364
-rw-r--r--lib/ansible/modules/cloud/vmware/vca_fw.py249
-rw-r--r--lib/ansible/modules/cloud/vmware/vca_nat.py219
-rw-r--r--lib/ansible/modules/cloud/vmware/vca_vapp.py286
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_cluster.py255
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_datacenter.py164
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_dns_config.py134
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_dvs_host.py253
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_dvs_portgroup.py202
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_dvswitch.py213
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_guest.py1349
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_host.py229
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_local_user_manager.py195
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_maintenancemode.py216
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_migrate_vmk.py200
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_portgroup.py167
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_target_canonical_facts.py99
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_vm_facts.py105
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_vm_shell.py190
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_vm_vss_dvs_migrate.py162
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_vmkernel.py212
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_vmkernel_ip_config.py127
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_vmotion.py154
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_vsan_cluster.py134
-rw-r--r--lib/ansible/modules/cloud/vmware/vmware_vswitch.py203
-rw-r--r--lib/ansible/modules/cloud/vmware/vsphere_copy.py195
-rw-r--r--lib/ansible/modules/cloud/webfaction/__init__.py0
-rw-r--r--lib/ansible/modules/cloud/webfaction/webfaction_app.py204
-rw-r--r--lib/ansible/modules/cloud/webfaction/webfaction_db.py205
-rw-r--r--lib/ansible/modules/cloud/webfaction/webfaction_domain.py176
-rw-r--r--lib/ansible/modules/cloud/webfaction/webfaction_mailbox.py144
-rw-r--r--lib/ansible/modules/cloud/webfaction/webfaction_site.py215
-rw-r--r--lib/ansible/modules/cloud/xenserver_facts.py209
229 files changed, 72560 insertions, 0 deletions
diff --git a/lib/ansible/modules/cloud/amazon/GUIDELINES.md b/lib/ansible/modules/cloud/amazon/GUIDELINES.md
new file mode 100644
index 0000000000..b8ca836b79
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/GUIDELINES.md
@@ -0,0 +1,264 @@
+# Guidelines for AWS modules
+
+## Getting Started
+
+Since Ansible 2.0, it is a requirement that all new AWS modules are written to use boto3.
+
+Prior to 2.0, modules may of been written in boto or boto3. Modules written using boto can continue to be extended using boto.
+
+Backward compatibility of older modules must be maintained.
+
+## Bug fixing
+
+If you are writing a bugfix for a module that uses boto, you should continue to use boto to maintain backward compatibility.
+
+If you are adding new functionality to an existing module that uses boto but the new functionality requires boto3, you
+must maintain backward compatibility of the module and ensure the module still works without boto3.
+
+## Naming your module
+
+Base the name of the module on the part of AWS that
+you actually use. (A good rule of thumb is to take
+whatever module you use with boto as a starting point).
+
+Don't further abbreviate names - if something is a well
+known abbreviation due to it being a major component of
+AWS, that's fine, but don't create new ones independently
+(e.g. VPC, ELB, etc. are fine)
+
+## Adding new features
+
+Try and keep backward compatibility with relatively recent
+versions of boto. That means that if want to implement some
+functionality that uses a new feature of boto, it should only
+fail if that feature actually needs to be run, with a message
+saying which version of boto is needed.
+
+Use feature testing (e.g. `hasattr('boto.module', 'shiny_new_method')`)
+to check whether boto supports a feature rather than version checking
+
+e.g. from the `ec2` module:
+```python
+if boto_supports_profile_name_arg(ec2):
+ params['instance_profile_name'] = instance_profile_name
+else:
+ if instance_profile_name is not None:
+ module.fail_json(msg="instance_profile_name parameter requires boto version 2.5.0 or higher")
+```
+
+## Using boto and boto3
+
+### Importing
+
+Wrap import statements in a try block and fail the module later if the import fails
+
+#### boto
+
+```python
+try:
+ import boto.ec2
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+def main():
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+```
+
+#### boto3
+
+```python
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+def main():
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+```
+
+#### boto and boto3 combined
+
+If you want to add boto3 functionality to a module written using boto, you must maintain backward compatibility.
+Ensure that you clearly document if a new parameter requires boto3. Import boto3 at the top of the
+module as normal and then use the HAS_BOTO3 bool when necessary, before the new feature.
+
+```python
+try:
+ import boto
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+if my_new_feauture_Parameter_is_set:
+ if HAS_BOTO3:
+ # do feature
+ else:
+ module.fail_json(msg="boto3 is required for this feature")
+```
+
+### Connecting to AWS
+
+To connect to AWS, you should use `get_aws_connection_info` and then
+`connect_to_aws`.
+
+The reason for using `get_aws_connection_info` and `connect_to_aws` rather than doing it
+yourself is that they handle some of the more esoteric connection
+options such as security tokens and boto profiles.
+
+Some boto services require region to be specified. You should check for the region parameter if required.
+
+#### boto
+
+An example of connecting to ec2:
+
+```python
+region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+if region:
+ try:
+ connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
+ module.fail_json(msg=str(e))
+else:
+ module.fail_json(msg="region must be specified")
+```
+
+#### boto3
+
+An example of connecting to ec2 is shown below. Note that there is no 'NoAuthHandlerFound' exception handling like in boto.
+Instead, an AuthFailure exception will be thrown when you use 'connection'. See exception handling.
+
+```python
+region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+if region:
+ connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
+else:
+ module.fail_json(msg="region must be specified")
+```
+
+### Exception Handling
+
+You should wrap any boto call in a try block. If an exception is thrown, it is up to you decide how to handle it
+but usually calling fail_json with the error message will suffice.
+
+#### boto
+
+```python
+# Import BotoServerError
+try:
+ import boto.ec2
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+# Connect to AWS
+...
+
+# Make a call to AWS
+try:
+ result = connection.aws_call()
+except BotoServerError, e:
+ module.fail_json(msg=e.message)
+```
+
+#### boto3
+
+For more information on botocore exception handling see [http://botocore.readthedocs.org/en/latest/client_upgrades.html#error-handling]
+
+Boto3 provides lots of useful info when an exception is thrown so pass this to the user along with the message.
+
+```python
+# Import ClientError from botocore
+try:
+ from botocore.exceptions import ClientError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+# Connect to AWS
+...
+
+# Make a call to AWS
+try:
+ result = connection.aws_call()
+except ClientError, e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+```
+
+If you need to perform an action based on the error boto3 returned, use the error code.
+
+```python
+# Make a call to AWS
+try:
+ result = connection.aws_call()
+except ClientError, e:
+ if e.response['Error']['Code'] == 'NoSuchEntity':
+ return None
+ else:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+```
+
+### Returning Values
+
+When you make a call using boto3, you will probably get back some useful information that you should return in the module.
+
+As well as information related to the call itself, you will also have some response metadata. It is OK to return this to
+the user as well as they may find it useful.
+
+Boto3 returns all values CamelCased. Ansible follows Python standards for variable names and uses snake_case. There is a
+helper function in module_utils/ec2.py called `camel_dict_to_snake_dict` that allows you to easily convert the boto3
+response to snake_case.
+
+You should use this helper function and avoid changing the names of values returned by Boto3. E.g. if boto3 returns a
+value called 'SecretAccessKey' do not change it to 'AccessKey'.
+
+```python
+# Make a call to AWS
+result = connection.aws_call()
+
+# Return the result to the user
+module.exit_json(changed=True, **camel_dict_to_snake_dict(result))
+```
+
+### Helper functions
+
+Along with the connection functions in Ansible ec2.py module_utils, there are some other useful functions detailed below.
+
+#### camel_dict_to_snake_dict
+
+boto3 returns results in a dict. The keys of the dict are in CamelCase format. In keeping
+with Ansible format, this function will convert the keys to snake_case.
+
+#### ansible_dict_to_boto3_filter_list
+
+Converts a an Ansible list of filters to a boto3 friendly list of dicts. This is useful for
+any boto3 _facts modules.
+
+#### boto3_tag_list_to_ansible_dict
+
+Converts a boto3 tag list to an Ansible dict. Boto3 returns tags as a list of dicts containing keys called
+'Key' and 'Value'. This function converts this list in to a single dict where the dict key is the tag
+key and the dict value is the tag value.
+
+#### ansible_dict_to_boto3_tag_list
+
+Opposite of above. Converts an Ansible dict to a boto3 tag list of dicts.
+
+#### get_ec2_security_group_ids_from_names
+
+Pass this function a list of security group names or combination of security group names and IDs and this function will
+return a list of IDs. You should also pass the VPC ID if known because security group names are not necessarily unique
+across VPCs. \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/cloudformation_facts.py b/lib/ansible/modules/cloud/amazon/cloudformation_facts.py
new file mode 100644
index 0000000000..ae40ed0242
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/cloudformation_facts.py
@@ -0,0 +1,290 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cloudformation_facts
+short_description: Obtain facts about an AWS CloudFormation stack
+description:
+ - Gets information about an AWS CloudFormation stack
+requirements:
+ - boto3 >= 1.0.0
+ - python >= 2.6
+version_added: "2.2"
+author: Justin Menga (@jmenga)
+options:
+ stack_name:
+ description:
+ - The name or id of the CloudFormation stack
+ required: true
+ all_facts:
+ description:
+ - Get all stack information for the stack
+ required: false
+ default: false
+ stack_events:
+ description:
+ - Get stack events for the stack
+ required: false
+ default: false
+ stack_template:
+ description:
+ - Get stack template body for the stack
+ required: false
+ default: false
+ stack_resources:
+ description:
+ - Get stack resources for the stack
+ required: false
+ default: false
+ stack_policy:
+ description:
+ - Get stack policy for the stack
+ required: false
+ default: false
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Get summary information about a stack
+- cloudformation_facts:
+ stack_name: my-cloudformation-stack
+
+# Facts are published in ansible_facts['cloudformation'][<stack_name>]
+- debug:
+ msg: '{{ ansible_facts['cloudformation']['my-cloudformation-stack'] }}'
+
+# Get all stack information about a stack
+- cloudformation_facts:
+ stack_name: my-cloudformation-stack
+ all_facts: true
+
+# Get stack resource and stack policy information about a stack
+- cloudformation_facts:
+ stack_name: my-cloudformation-stack
+ stack_resources: true
+ stack_policy: true
+
+# Example dictionary outputs for stack_outputs, stack_parameters and stack_resources:
+"stack_outputs": {
+ "ApplicationDatabaseName": "dazvlpr01xj55a.ap-southeast-2.rds.amazonaws.com",
+ ...
+},
+"stack_parameters": {
+ "DatabaseEngine": "mysql",
+ "DatabasePassword": "****",
+ ...
+},
+"stack_resources": {
+ "AutoscalingGroup": "dev-someapp-AutoscalingGroup-1SKEXXBCAN0S7",
+ "AutoscalingSecurityGroup": "sg-abcd1234",
+ "ApplicationDatabase": "dazvlpr01xj55a",
+ "EcsTaskDefinition": "arn:aws:ecs:ap-southeast-2:123456789:task-definition/dev-someapp-EcsTaskDefinition-1F2VM9QB0I7K9:1"
+ ...
+}
+'''
+
+RETURN = '''
+stack_description:
+ description: Summary facts about the stack
+ returned: always
+ type: dict
+stack_outputs:
+ description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each output 'OutputValue' parameter
+ returned: always
+ type: dict
+stack_parameters:
+ description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of each parameter 'ParameterValue' parameter
+ returned: always
+ type: dict
+stack_events:
+ description: All stack events for the stack
+ returned: only if all_facts or stack_events is true
+ type: list of events
+stack_policy:
+ description: Describes the stack policy for the stack
+ returned: only if all_facts or stack_policy is true
+ type: dict
+stack_template:
+ description: Describes the stack template for the stack
+ returned: only if all_facts or stack_template is true
+ type: dict
+stack_resource_list:
+ description: Describes stack resources for the stack
+ returned: only if all_facts or stack_resourses is true
+ type: list of resources
+stack_resources:
+ description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each resource 'PhysicalResourceId' parameter
+ returned: only if all_facts or stack_resourses is true
+ type: dict
+'''
+
+try:
+ import boto3
+ import botocore
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.ec2 import get_aws_connection_info, ec2_argument_spec
+from ansible.module_utils.basic import AnsibleModule
+from functools import partial
+import json
+import traceback
+
+class CloudFormationServiceManager:
+ """Handles CloudFormation Services"""
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ self.client = boto3_conn(module, conn_type='client',
+ resource='cloudformation', region=region,
+ endpoint=ec2_url, **aws_connect_kwargs)
+ except botocore.exceptions.NoRegionError:
+ self.module.fail_json(msg="Region must be specified as a parameter, in AWS_DEFAULT_REGION environment variable or in boto configuration file")
+ except Exception as e:
+ self.module.fail_json(msg="Can't establish connection - " + str(e), exception=traceback.format_exc(e))
+
+ def describe_stack(self, stack_name):
+ try:
+ func = partial(self.client.describe_stacks,StackName=stack_name)
+ response = self.paginated_response(func, 'Stacks')
+ if response:
+ return response[0]
+ self.module.fail_json(msg="Error describing stack - an empty response was returned")
+ except Exception as e:
+ self.module.fail_json(msg="Error describing stack - " + str(e), exception=traceback.format_exc(e))
+
+ def list_stack_resources(self, stack_name):
+ try:
+ func = partial(self.client.list_stack_resources,StackName=stack_name)
+ return self.paginated_response(func, 'StackResourceSummaries')
+ except Exception as e:
+ self.module.fail_json(msg="Error listing stack resources - " + str(e), exception=traceback.format_exc(e))
+
+ def describe_stack_events(self, stack_name):
+ try:
+ func = partial(self.client.describe_stack_events,StackName=stack_name)
+ return self.paginated_response(func, 'StackEvents')
+ except Exception as e:
+ self.module.fail_json(msg="Error describing stack events - " + str(e), exception=traceback.format_exc(e))
+
+ def get_stack_policy(self, stack_name):
+ try:
+ response = self.client.get_stack_policy(StackName=stack_name)
+ stack_policy = response.get('StackPolicyBody')
+ if stack_policy:
+ return json.loads(stack_policy)
+ return dict()
+ except Exception as e:
+ self.module.fail_json(msg="Error getting stack policy - " + str(e), exception=traceback.format_exc(e))
+
+ def get_template(self, stack_name):
+ try:
+ response = self.client.get_template(StackName=stack_name)
+ return response.get('TemplateBody')
+ except Exception as e:
+ self.module.fail_json(msg="Error getting stack template - " + str(e), exception=traceback.format_exc(e))
+
+ def paginated_response(self, func, result_key, next_token=None):
+ '''
+ Returns expanded response for paginated operations.
+ The 'result_key' is used to define the concatenated results that are combined from each paginated response.
+ '''
+ args=dict()
+ if next_token:
+ args['NextToken'] = next_token
+ response = func(**args)
+ result = response.get(result_key)
+ next_token = response.get('NextToken')
+ if not next_token:
+ return result
+ return result + self.paginated_response(func, result_key, next_token)
+
+def to_dict(items, key, value):
+ ''' Transforms a list of items to a Key/Value dictionary '''
+ if items:
+ return dict(zip([i[key] for i in items], [i[value] for i in items]))
+ else:
+ return dict()
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ stack_name=dict(required=True, type='str' ),
+ all_facts=dict(required=False, default=False, type='bool'),
+ stack_policy=dict(required=False, default=False, type='bool'),
+ stack_events=dict(required=False, default=False, type='bool'),
+ stack_resources=dict(required=False, default=False, type='bool'),
+ stack_template=dict(required=False, default=False, type='bool'),
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required.')
+
+ # Describe the stack
+ service_mgr = CloudFormationServiceManager(module)
+ stack_name = module.params.get('stack_name')
+ result = {
+ 'ansible_facts': { 'cloudformation': { stack_name:{} } }
+ }
+ facts = result['ansible_facts']['cloudformation'][stack_name]
+ facts['stack_description'] = service_mgr.describe_stack(stack_name)
+
+ # Create stack output and stack parameter dictionaries
+ if facts['stack_description']:
+ facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue')
+ facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'), 'ParameterKey', 'ParameterValue')
+
+ # normalize stack description API output
+ facts['stack_description'] = camel_dict_to_snake_dict(facts['stack_description'])
+ # camel2snake doesn't handle NotificationARNs properly, so let's fix that
+ facts['stack_description']['notification_arns'] = facts['stack_description'].pop('notification_ar_ns', [])
+
+ # Create optional stack outputs
+ all_facts = module.params.get('all_facts')
+ if all_facts or module.params.get('stack_resources'):
+ facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name)
+ facts['stack_resources'] = to_dict(facts.get('stack_resource_list'), 'LogicalResourceId', 'PhysicalResourceId')
+ if all_facts or module.params.get('stack_template'):
+ facts['stack_template'] = service_mgr.get_template(stack_name)
+ if all_facts or module.params.get('stack_policy'):
+ facts['stack_policy'] = service_mgr.get_stack_policy(stack_name)
+ if all_facts or module.params.get('stack_events'):
+ facts['stack_events'] = service_mgr.describe_stack_events(stack_name)
+
+ result['changed'] = False
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/cloudtrail.py b/lib/ansible/modules/cloud/amazon/cloudtrail.py
new file mode 100644
index 0000000000..ab4652fccd
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/cloudtrail.py
@@ -0,0 +1,245 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: cloudtrail
+short_description: manage CloudTrail creation and deletion
+description:
+ - Creates or deletes CloudTrail configuration. Ensures logging is also enabled.
+version_added: "2.0"
+author:
+ - "Ansible Core Team"
+ - "Ted Timmons"
+requirements:
+ - "boto >= 2.21"
+options:
+ state:
+ description:
+ - add or remove CloudTrail configuration.
+ required: true
+ choices: ['enabled', 'disabled']
+ name:
+ description:
+ - name for given CloudTrail configuration.
+ - This is a primary key and is used to identify the configuration.
+ s3_bucket_prefix:
+ description:
+ - bucket to place CloudTrail in.
+ - this bucket should exist and have the proper policy. See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html)
+ - required when state=enabled.
+ required: false
+ s3_key_prefix:
+ description:
+ - prefix to keys in bucket. A trailing slash is not necessary and will be removed.
+ required: false
+ include_global_events:
+ description:
+ - record API calls from global services such as IAM and STS?
+ required: false
+ default: false
+ choices: ["true", "false"]
+
+ aws_secret_key:
+ description:
+ - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
+ required: false
+ default: null
+ aliases: [ 'ec2_secret_key', 'secret_key' ]
+ version_added: "1.5"
+ aws_access_key:
+ description:
+ - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
+ required: false
+ default: null
+ aliases: [ 'ec2_access_key', 'access_key' ]
+ version_added: "1.5"
+ region:
+ description:
+ - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
+ required: false
+ aliases: ['aws_region', 'ec2_region']
+ version_added: "1.5"
+
+extends_documentation_fragment: aws
+"""
+
+EXAMPLES = """
+ - name: enable cloudtrail
+ local_action: cloudtrail
+ state: enabled
+ name: main
+ s3_bucket_name: ourbucket
+ s3_key_prefix: cloudtrail
+ region: us-east-1
+
+ - name: enable cloudtrail with different configuration
+ local_action: cloudtrail
+ state: enabled
+ name: main
+ s3_bucket_name: ourbucket2
+ s3_key_prefix: ''
+ region: us-east-1
+
+ - name: remove cloudtrail
+ local_action: cloudtrail
+ state: disabled
+ name: main
+ region: us-east-1
+"""
+
+HAS_BOTO = False
+try:
+ import boto
+ import boto.cloudtrail
+ from boto.regioninfo import RegionInfo
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_ec2_creds
+
+
+class CloudTrailManager:
+ """Handles cloudtrail configuration"""
+
+ def __init__(self, module, region=None, **aws_connect_params):
+ self.module = module
+ self.region = region
+ self.aws_connect_params = aws_connect_params
+ self.changed = False
+
+ try:
+ self.conn = connect_to_aws(boto.cloudtrail, self.region, **self.aws_connect_params)
+ except boto.exception.NoAuthHandlerFound as e:
+ self.module.fail_json(msg=str(e))
+
+ def view_status(self, name):
+ return self.conn.get_trail_status(name)
+
+ def view(self, name):
+ ret = self.conn.describe_trails(trail_name_list=[name])
+ trailList = ret.get('trailList', [])
+ if len(trailList) == 1:
+ return trailList[0]
+ return None
+
+ def exists(self, name=None):
+ ret = self.view(name)
+ if ret:
+ return True
+ return False
+
+ def enable_logging(self, name):
+ '''Turn on logging for a cloudtrail that already exists. Throws Exception on error.'''
+ self.conn.start_logging(name)
+
+
+ def enable(self, **create_args):
+ return self.conn.create_trail(**create_args)
+
+ def update(self, **create_args):
+ return self.conn.update_trail(**create_args)
+
+ def delete(self, name):
+ '''Delete a given cloudtrial configuration. Throws Exception on error.'''
+ self.conn.delete_trail(name)
+
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state={'required': True, 'choices': ['enabled', 'disabled']},
+ name={'required': True, 'type': 'str'},
+ s3_bucket_name={'required': False, 'type': 'str'},
+ s3_key_prefix={'default': '', 'required': False, 'type': 'str'},
+ include_global_events={'default': True, 'required': False, 'type': 'bool'},
+ ))
+ required_together = (['state', 's3_bucket_name'])
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required.')
+
+ ec2_url, access_key, secret_key, region = get_ec2_creds(module)
+ aws_connect_params = dict(aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+
+ ct_name = module.params['name']
+ s3_bucket_name = module.params['s3_bucket_name']
+ # remove trailing slash from the key prefix, really messes up the key structure.
+ s3_key_prefix = module.params['s3_key_prefix'].rstrip('/')
+
+ include_global_events = module.params['include_global_events']
+
+ #if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
+ # module.fail_json(msg="ELBs are required for registration or viewing")
+
+ cf_man = CloudTrailManager(module, region=region, **aws_connect_params)
+
+ results = { 'changed': False }
+ if module.params['state'] == 'enabled':
+ results['exists'] = cf_man.exists(name=ct_name)
+ if results['exists']:
+ results['view'] = cf_man.view(ct_name)
+ # only update if the values have changed.
+ if results['view']['S3BucketName'] != s3_bucket_name or \
+ results['view'].get('S3KeyPrefix', '') != s3_key_prefix or \
+ results['view']['IncludeGlobalServiceEvents'] != include_global_events:
+ if not module.check_mode:
+ results['update'] = cf_man.update(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events)
+ results['changed'] = True
+ else:
+ if not module.check_mode:
+ # doesn't exist. create it.
+ results['enable'] = cf_man.enable(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events)
+ results['changed'] = True
+
+ # given cloudtrail should exist now. Enable the logging.
+ results['view_status'] = cf_man.view_status(ct_name)
+ results['was_logging_enabled'] = results['view_status'].get('IsLogging', False)
+ if not results['was_logging_enabled']:
+ if not module.check_mode:
+ cf_man.enable_logging(ct_name)
+ results['logging_enabled'] = True
+ results['changed'] = True
+
+ # delete the cloudtrai
+ elif module.params['state'] == 'disabled':
+ # check to see if it exists before deleting.
+ results['exists'] = cf_man.exists(name=ct_name)
+ if results['exists']:
+ # it exists, so we should delete it and mark changed.
+ if not module.check_mode:
+ cf_man.delete(ct_name)
+ results['changed'] = True
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py b/lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py
new file mode 100644
index 0000000000..643343d82f
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py
@@ -0,0 +1,415 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cloudwatchevent_rule
+short_description: Manage CloudWatch Event rules and targets
+description:
+ - This module creates and manages CloudWatch event rules and targets.
+version_added: "2.2"
+extends_documentation_fragment:
+ - aws
+author: "Jim Dalton (@jsdalton) <jim.dalton@gmail.com>"
+requirements:
+ - python >= 2.6
+ - boto3
+notes:
+ - A rule must contain at least an I(event_pattern) or I(schedule_expression). A
+ rule can have both an I(event_pattern) and a I(schedule_expression), in which
+ case the rule will trigger on matching events as well as on a schedule.
+ - When specifying targets, I(input) and I(input_path) are mutually-exclusive
+ and optional parameters.
+options:
+ name:
+ description:
+ - The name of the rule you are creating, updating or deleting. No spaces
+ or special characters allowed (i.e. must match C([\.\-_A-Za-z0-9]+))
+ required: true
+ schedule_expression:
+ description:
+ - A cron or rate expression that defines the schedule the rule will
+ trigger on. For example, C(cron(0 20 * * ? *)), C(rate(5 minutes))
+ required: false
+ event_pattern:
+ description:
+ - A string pattern (in valid JSON format) that is used to match against
+ incoming events to determine if the rule should be triggered
+ required: false
+ state:
+ description:
+ - Whether the rule is present (and enabled), disabled, or absent
+ choices: ["present", "disabled", "absent"]
+ default: present
+ required: false
+ description:
+ description:
+ - A description of the rule
+ required: false
+ role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the IAM role associated with the rule
+ required: false
+ targets:
+ description:
+ - "A dictionary array of targets to add to or update for the rule, in the
+ form C({ id: [string], arn: [string], input: [valid JSON string], input_path: [valid JSONPath string] }).
+ I(id) [required] is the unique target assignment ID. I(arn) (required)
+ is the Amazon Resource Name associated with the target. I(input)
+ (optional) is a JSON object that will override the event data when
+ passed to the target. I(input_path) (optional) is a JSONPath string
+ (e.g. C($.detail)) that specifies the part of the event data to be
+ passed to the target. If neither I(input) nor I(input_path) is
+ specified, then the entire event is passed to the target in JSON form."
+ required: false
+'''
+
+EXAMPLES = '''
+- cloudwatchevent_rule:
+ name: MyCronTask
+ schedule_expression: "cron(0 20 * * ? *)"
+ description: Run my scheduled task
+ targets:
+ - id: MyTargetId
+ arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
+
+- cloudwatchevent_rule:
+ name: MyDisabledCronTask
+ schedule_expression: "cron(5 minutes)"
+ description: Run my disabled scheduled task
+ state: disabled
+ targets:
+ - id: MyOtherTargetId
+ arn: arn:aws:lambda:us-east-1:123456789012:function:MyFunction
+ input: '{"foo": "bar"}'
+
+- cloudwatchevent_rule:
+ name: MyCronTask
+ state: absent
+'''
+
+RETURN = '''
+rule:
+ description: CloudWatch Event rule data
+ returned: success
+ type: dict
+ sample: "{ 'arn': 'arn:aws:events:us-east-1:123456789012:rule/MyCronTask', 'description': 'Run my scheduled task', 'name': 'MyCronTask', 'schedule_expression': 'cron(0 20 * * ? *)', 'state': 'ENABLED' }"
+targets:
+ description: CloudWatch Event target(s) assigned to the rule
+ returned: success
+ type: list
+ sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]"
+'''
+
+
+class CloudWatchEventRule(object):
+ def __init__(self, module, name, client, schedule_expression=None,
+ event_pattern=None, description=None, role_arn=None):
+ self.name = name
+ self.client = client
+ self.changed = False
+ self.schedule_expression = schedule_expression
+ self.event_pattern = event_pattern
+ self.description = description
+ self.role_arn = role_arn
+
+ def describe(self):
+ """Returns the existing details of the rule in AWS"""
+ try:
+ rule_info = self.client.describe_rule(Name=self.name)
+ except botocore.exceptions.ClientError as e:
+ error_code = e.response.get('Error', {}).get('Code')
+ if error_code == 'ResourceNotFoundException':
+ return {}
+ raise
+ return self._snakify(rule_info)
+
+ def put(self, enabled=True):
+ """Creates or updates the rule in AWS"""
+ request = {
+ 'Name': self.name,
+ 'State': "ENABLED" if enabled else "DISABLED",
+ }
+ if self.schedule_expression:
+ request['ScheduleExpression'] = self.schedule_expression
+ if self.event_pattern:
+ request['EventPattern'] = self.event_pattern
+ if self.description:
+ request['Description'] = self.description
+ if self.role_arn:
+ request['RoleArn'] = self.role_arn
+ response = self.client.put_rule(**request)
+ self.changed = True
+ return response
+
+ def delete(self):
+ """Deletes the rule in AWS"""
+ self.remove_all_targets()
+ response = self.client.delete_rule(Name=self.name)
+ self.changed = True
+ return response
+
+ def enable(self):
+ """Enables the rule in AWS"""
+ response = self.client.enable_rule(Name=self.name)
+ self.changed = True
+ return response
+
+ def disable(self):
+ """Disables the rule in AWS"""
+ response = self.client.disable_rule(Name=self.name)
+ self.changed = True
+ return response
+
+ def list_targets(self):
+ """Lists the existing targets for the rule in AWS"""
+ try:
+ targets = self.client.list_targets_by_rule(Rule=self.name)
+ except botocore.exceptions.ClientError as e:
+ error_code = e.response.get('Error', {}).get('Code')
+ if error_code == 'ResourceNotFoundException':
+ return []
+ raise
+ return self._snakify(targets)['targets']
+
+ def put_targets(self, targets):
+ """Creates or updates the provided targets on the rule in AWS"""
+ if not targets:
+ return
+ request = {
+ 'Rule': self.name,
+ 'Targets': self._targets_request(targets),
+ }
+ response = self.client.put_targets(**request)
+ self.changed = True
+ return response
+
+ def remove_targets(self, target_ids):
+ """Removes the provided targets from the rule in AWS"""
+ if not target_ids:
+ return
+ request = {
+ 'Rule': self.name,
+ 'Ids': target_ids
+ }
+ response = self.client.remove_targets(**request)
+ self.changed = True
+ return response
+
+ def remove_all_targets(self):
+ """Removes all targets on rule"""
+ targets = self.list_targets()
+ return self.remove_targets([t['id'] for t in targets])
+
+ def _targets_request(self, targets):
+ """Formats each target for the request"""
+ targets_request = []
+ for target in targets:
+ target_request = {
+ 'Id': target['id'],
+ 'Arn': target['arn']
+ }
+ if 'input' in target:
+ target_request['Input'] = target['input']
+ if 'input_path' in target:
+ target_request['InputPath'] = target['input_path']
+ targets_request.append(target_request)
+ return targets_request
+
+ def _snakify(self, dict):
+ """Converts cammel case to snake case"""
+ return camel_dict_to_snake_dict(dict)
+
+
+class CloudWatchEventRuleManager(object):
+ RULE_FIELDS = ['name', 'event_pattern', 'schedule_expression', 'description', 'role_arn']
+
+ def __init__(self, rule, targets):
+ self.rule = rule
+ self.targets = targets
+
+ def ensure_present(self, enabled=True):
+ """Ensures the rule and targets are present and synced"""
+ rule_description = self.rule.describe()
+ if rule_description:
+ # Rule exists so update rule, targets and state
+ self._sync_rule(enabled)
+ self._sync_targets()
+ self._sync_state(enabled)
+ else:
+ # Rule does not exist, so create new rule and targets
+ self._create(enabled)
+
+ def ensure_disabled(self):
+ """Ensures the rule and targets are present, but disabled, and synced"""
+ self.ensure_present(enabled=False)
+
+ def ensure_absent(self):
+ """Ensures the rule and targets are absent"""
+ rule_description = self.rule.describe()
+ if not rule_description:
+ # Rule doesn't exist so don't need to delete
+ return
+ self.rule.delete()
+
+ def fetch_aws_state(self):
+ """Retrieves rule and target state from AWS"""
+ aws_state = {
+ 'rule': {},
+ 'targets': [],
+ 'changed': self.rule.changed
+ }
+ rule_description = self.rule.describe()
+ if not rule_description:
+ return aws_state
+
+ # Don't need to include response metadata noise in response
+ del rule_description['response_metadata']
+
+ aws_state['rule'] = rule_description
+ aws_state['targets'].extend(self.rule.list_targets())
+ return aws_state
+
+ def _sync_rule(self, enabled=True):
+ """Syncs local rule state with AWS"""
+ if not self._rule_matches_aws():
+ self.rule.put(enabled)
+
+ def _sync_targets(self):
+ """Syncs local targets with AWS"""
+ # Identify and remove extraneous targets on AWS
+ target_ids_to_remove = self._remote_target_ids_to_remove()
+ if target_ids_to_remove:
+ self.rule.remove_targets(target_ids_to_remove)
+
+ # Identify targets that need to be added or updated on AWS
+ targets_to_put = self._targets_to_put()
+ if targets_to_put:
+ self.rule.put_targets(targets_to_put)
+
+ def _sync_state(self, enabled=True):
+ """Syncs local rule state with AWS"""
+ remote_state = self._remote_state()
+ if enabled and remote_state != 'ENABLED':
+ self.rule.enable()
+ elif not enabled and remote_state != 'DISABLED':
+ self.rule.disable()
+
+ def _create(self, enabled=True):
+ """Creates rule and targets on AWS"""
+ self.rule.put(enabled)
+ self.rule.put_targets(self.targets)
+
+ def _rule_matches_aws(self):
+ """Checks if the local rule data matches AWS"""
+ aws_rule_data = self.rule.describe()
+
+ # The rule matches AWS only if all rule data fields are equal
+ # to their corresponding local value defined in the task
+ return all([
+ getattr(self.rule, field) == aws_rule_data.get(field, None)
+ for field in self.RULE_FIELDS
+ ])
+
+ def _targets_to_put(self):
+ """Returns a list of targets that need to be updated or added remotely"""
+ remote_targets = self.rule.list_targets()
+ return [t for t in self.targets if t not in remote_targets]
+
+ def _remote_target_ids_to_remove(self):
+ """Returns a list of targets that need to be removed remotely"""
+ target_ids = [t['id'] for t in self.targets]
+ remote_targets = self.rule.list_targets()
+ return [
+ rt['id'] for rt in remote_targets if rt['id'] not in target_ids
+ ]
+
+ def _remote_state(self):
+ """Returns the remote state from AWS"""
+ description = self.rule.describe()
+ if not description:
+ return
+ return description['state']
+
+
+def get_cloudwatchevents_client(module):
+ """Returns a boto3 client for accessing CloudWatch Events"""
+ try:
+ region, ec2_url, aws_conn_kwargs = get_aws_connection_info(module,
+ boto3=True)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in \
+ EC2_REGION or AWS_REGION environment variables \
+ or in boto configuration file")
+ return boto3_conn(module, conn_type='client',
+ resource='events',
+ region=region, endpoint=ec2_url,
+ **aws_conn_kwargs)
+ except boto3.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ schedule_expression = dict(),
+ event_pattern = dict(),
+ state = dict(choices=['present', 'disabled', 'absent'],
+ default='present'),
+ description = dict(),
+ role_arn = dict(),
+ targets = dict(type='list', default=[]),
+ ))
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ rule_data = dict(
+ [(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS]
+ )
+ targets = module.params.get('targets')
+ state = module.params.get('state')
+
+ cwe_rule = CloudWatchEventRule(module,
+ client=get_cloudwatchevents_client(module),
+ **rule_data)
+ cwe_rule_manager = CloudWatchEventRuleManager(cwe_rule, targets)
+
+ if state == 'present':
+ cwe_rule_manager.ensure_present()
+ elif state == 'disabled':
+ cwe_rule_manager.ensure_disabled()
+ elif state == 'absent':
+ cwe_rule_manager.ensure_absent()
+ else:
+ module.fail_json(msg="Invalid state '{0}' provided".format(state))
+
+ module.exit_json(**cwe_rule_manager.fetch_aws_state())
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/dynamodb_table.py b/lib/ansible/modules/cloud/amazon/dynamodb_table.py
new file mode 100644
index 0000000000..75e410d4b7
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/dynamodb_table.py
@@ -0,0 +1,422 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: dynamodb_table
+short_description: Create, update or delete AWS Dynamo DB tables.
+version_added: "2.0"
+description:
+ - Create or delete AWS Dynamo DB tables.
+ - Can update the provisioned throughput on existing tables.
+ - Returns the status of the specified table.
+author: Alan Loi (@loia)
+requirements:
+ - "boto >= 2.37.0"
+options:
+ state:
+ description:
+ - Create or delete the table
+ required: false
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ description:
+ - Name of the table.
+ required: true
+ hash_key_name:
+ description:
+ - Name of the hash key.
+ - Required when C(state=present).
+ required: false
+ default: null
+ hash_key_type:
+ description:
+ - Type of the hash key.
+ required: false
+ choices: ['STRING', 'NUMBER', 'BINARY']
+ default: 'STRING'
+ range_key_name:
+ description:
+ - Name of the range key.
+ required: false
+ default: null
+ range_key_type:
+ description:
+ - Type of the range key.
+ required: false
+ choices: ['STRING', 'NUMBER', 'BINARY']
+ default: 'STRING'
+ read_capacity:
+ description:
+ - Read throughput capacity (units) to provision.
+ required: false
+ default: 1
+ write_capacity:
+ description:
+ - Write throughput capacity (units) to provision.
+ required: false
+ default: 1
+ indexes:
+ description:
+ - list of dictionaries describing indexes to add to the table. global indexes can be updated. local indexes don't support updates or have throughput.
+ - "required options: ['name', 'type', 'hash_key_name']"
+ - "valid types: ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only']"
+ - "other options: ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']"
+ required: false
+ default: []
+ version_added: "2.1"
+extends_documentation_fragment:
+ - aws
+ - ec2
+"""
+
+EXAMPLES = '''
+# Create dynamo table with hash and range primary key
+- dynamodb_table:
+ name: my-table
+ region: us-east-1
+ hash_key_name: id
+ hash_key_type: STRING
+ range_key_name: create_time
+ range_key_type: NUMBER
+ read_capacity: 2
+ write_capacity: 2
+
+# Update capacity on existing dynamo table
+- dynamodb_table:
+ name: my-table
+ region: us-east-1
+ read_capacity: 10
+ write_capacity: 10
+
+# set index on existing dynamo table
+- dynamodb_table:
+ name: my-table
+ region: us-east-1
+ indexes:
+ - name: NamedIndex
+ type: global_include
+ hash_key_name: id
+ range_key_name: create_time
+ includes:
+ - other_field
+ - other_field2
+ read_capacity: 10
+ write_capacity: 10
+
+# Delete dynamo table
+- dynamodb_table:
+ name: my-table
+ region: us-east-1
+ state: absent
+'''
+
+RETURN = '''
+table_status:
+ description: The current status of the table.
+ returned: success
+ type: string
+ sample: ACTIVE
+'''
+
+import traceback
+
+try:
+ import boto
+ import boto.dynamodb2
+ from boto.dynamodb2.table import Table
+ from boto.dynamodb2.fields import HashKey, RangeKey, AllIndex, GlobalAllIndex, GlobalIncludeIndex, GlobalKeysOnlyIndex, IncludeIndex, KeysOnlyIndex
+ from boto.dynamodb2.types import STRING, NUMBER, BINARY
+ from boto.exception import BotoServerError, NoAuthHandlerFound, JSONResponseError
+ from boto.dynamodb2.exceptions import ValidationException
+ HAS_BOTO = True
+
+ DYNAMO_TYPE_MAP = {
+ 'STRING': STRING,
+ 'NUMBER': NUMBER,
+ 'BINARY': BINARY
+ }
+
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
+
+
+DYNAMO_TYPE_DEFAULT = 'STRING'
+INDEX_REQUIRED_OPTIONS = ['name', 'type', 'hash_key_name']
+INDEX_OPTIONS = INDEX_REQUIRED_OPTIONS + ['hash_key_type', 'range_key_name', 'range_key_type', 'includes', 'read_capacity', 'write_capacity']
+INDEX_TYPE_OPTIONS = ['all', 'global_all', 'global_include', 'global_keys_only', 'include', 'keys_only']
+
+
+def create_or_update_dynamo_table(connection, module):
+ table_name = module.params.get('name')
+ hash_key_name = module.params.get('hash_key_name')
+ hash_key_type = module.params.get('hash_key_type')
+ range_key_name = module.params.get('range_key_name')
+ range_key_type = module.params.get('range_key_type')
+ read_capacity = module.params.get('read_capacity')
+ write_capacity = module.params.get('write_capacity')
+ all_indexes = module.params.get('indexes')
+
+ for index in all_indexes:
+ validate_index(index, module)
+
+ schema = get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type)
+
+ throughput = {
+ 'read': read_capacity,
+ 'write': write_capacity
+ }
+
+ indexes, global_indexes = get_indexes(all_indexes)
+
+ result = dict(
+ region=module.params.get('region'),
+ table_name=table_name,
+ hash_key_name=hash_key_name,
+ hash_key_type=hash_key_type,
+ range_key_name=range_key_name,
+ range_key_type=range_key_type,
+ read_capacity=read_capacity,
+ write_capacity=write_capacity,
+ indexes=all_indexes,
+ )
+
+ try:
+ table = Table(table_name, connection=connection)
+
+
+ if dynamo_table_exists(table):
+ result['changed'] = update_dynamo_table(table, throughput=throughput, check_mode=module.check_mode, global_indexes=global_indexes)
+ else:
+ if not module.check_mode:
+ Table.create(table_name, connection=connection, schema=schema, throughput=throughput, indexes=indexes, global_indexes=global_indexes)
+ result['changed'] = True
+
+ if not module.check_mode:
+ result['table_status'] = table.describe()['Table']['TableStatus']
+
+ except BotoServerError:
+ result['msg'] = 'Failed to create/update dynamo table due to error: ' + traceback.format_exc()
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+def delete_dynamo_table(connection, module):
+ table_name = module.params.get('name')
+
+ result = dict(
+ region=module.params.get('region'),
+ table_name=table_name,
+ )
+
+ try:
+ table = Table(table_name, connection=connection)
+
+ if dynamo_table_exists(table):
+ if not module.check_mode:
+ table.delete()
+ result['changed'] = True
+
+ else:
+ result['changed'] = False
+
+ except BotoServerError:
+ result['msg'] = 'Failed to delete dynamo table due to error: ' + traceback.format_exc()
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+def dynamo_table_exists(table):
+ try:
+ table.describe()
+ return True
+
+ except JSONResponseError as e:
+ if e.message and e.message.startswith('Requested resource not found'):
+ return False
+ else:
+ raise e
+
+
+def update_dynamo_table(table, throughput=None, check_mode=False, global_indexes=None):
+ table.describe() # populate table details
+ throughput_changed = False
+ global_indexes_changed = False
+ if has_throughput_changed(table, throughput):
+ if not check_mode:
+ throughput_changed = table.update(throughput=throughput)
+ else:
+ throughput_changed = True
+
+ removed_indexes, added_indexes, index_throughput_changes = get_changed_global_indexes(table, global_indexes)
+ if removed_indexes:
+ if not check_mode:
+ for name, index in removed_indexes.iteritems():
+ global_indexes_changed = table.delete_global_secondary_index(name) or global_indexes_changed
+ else:
+ global_indexes_changed = True
+
+ if added_indexes:
+ if not check_mode:
+ for name, index in added_indexes.iteritems():
+ global_indexes_changed = table.create_global_secondary_index(global_index=index) or global_indexes_changed
+ else:
+ global_indexes_changed = True
+
+ if index_throughput_changes:
+ if not check_mode:
+ # todo: remove try once boto has https://github.com/boto/boto/pull/3447 fixed
+ try:
+ global_indexes_changed = table.update_global_secondary_index(global_indexes=index_throughput_changes) or global_indexes_changed
+ except ValidationException:
+ pass
+ else:
+ global_indexes_changed = True
+
+ return throughput_changed or global_indexes_changed
+
+
+def has_throughput_changed(table, new_throughput):
+ if not new_throughput:
+ return False
+
+ return new_throughput['read'] != table.throughput['read'] or \
+ new_throughput['write'] != table.throughput['write']
+
+
+def get_schema_param(hash_key_name, hash_key_type, range_key_name, range_key_type):
+ if range_key_name:
+ schema = [
+ HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT])),
+ RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT]))
+ ]
+ else:
+ schema = [
+ HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type, DYNAMO_TYPE_MAP[DYNAMO_TYPE_DEFAULT]))
+ ]
+ return schema
+
+
+def get_changed_global_indexes(table, global_indexes):
+ table.describe()
+
+ table_index_info = dict((index.name, index.schema()) for index in table.global_indexes)
+ table_index_objects = dict((index.name, index) for index in table.global_indexes)
+ set_index_info = dict((index.name, index.schema()) for index in global_indexes)
+ set_index_objects = dict((index.name, index) for index in global_indexes)
+
+ removed_indexes = dict((name, index) for name, index in table_index_info.iteritems() if name not in set_index_info)
+ added_indexes = dict((name, set_index_objects[name]) for name, index in set_index_info.iteritems() if name not in table_index_info)
+ # todo: uncomment once boto has https://github.com/boto/boto/pull/3447 fixed
+ # index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.iteritems() if name not in added_indexes and (index.throughput['read'] != str(table_index_objects[name].throughput['read']) or index.throughput['write'] != str(table_index_objects[name].throughput['write'])))
+ # todo: remove once boto has https://github.com/boto/boto/pull/3447 fixed
+ index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.iteritems() if name not in added_indexes)
+
+ return removed_indexes, added_indexes, index_throughput_changes
+
+
+def validate_index(index, module):
+ for key, val in index.iteritems():
+ if key not in INDEX_OPTIONS:
+ module.fail_json(msg='%s is not a valid option for an index' % key)
+ for required_option in INDEX_REQUIRED_OPTIONS:
+ if required_option not in index:
+ module.fail_json(msg='%s is a required option for an index' % required_option)
+ if index['type'] not in INDEX_TYPE_OPTIONS:
+ module.fail_json(msg='%s is not a valid index type, must be one of %s' % (index['type'], INDEX_TYPE_OPTIONS))
+
+def get_indexes(all_indexes):
+ indexes = []
+ global_indexes = []
+ for index in all_indexes:
+ name = index['name']
+ schema = get_schema_param(index.get('hash_key_name'), index.get('hash_key_type'), index.get('range_key_name'), index.get('range_key_type'))
+ throughput = {
+ 'read': index.get('read_capacity', 1),
+ 'write': index.get('write_capacity', 1)
+ }
+
+ if index['type'] == 'all':
+ indexes.append(AllIndex(name, parts=schema))
+
+ elif index['type'] == 'global_all':
+ global_indexes.append(GlobalAllIndex(name, parts=schema, throughput=throughput))
+
+ elif index['type'] == 'global_include':
+ global_indexes.append(GlobalIncludeIndex(name, parts=schema, throughput=throughput, includes=index['includes']))
+
+ elif index['type'] == 'global_keys_only':
+ global_indexes.append(GlobalKeysOnlyIndex(name, parts=schema, throughput=throughput))
+
+ elif index['type'] == 'include':
+ indexes.append(IncludeIndex(name, parts=schema, includes=index['includes']))
+
+ elif index['type'] == 'keys_only':
+ indexes.append(KeysOnlyIndex(name, parts=schema))
+
+ return indexes, global_indexes
+
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ hash_key_name=dict(required=True, type='str'),
+ hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
+ range_key_name=dict(type='str'),
+ range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']),
+ read_capacity=dict(default=1, type='int'),
+ write_capacity=dict(default=1, type='int'),
+ indexes=dict(default=[], type='list'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg='region must be specified')
+
+ try:
+ connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params)
+ except (NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+
+ state = module.params.get('state')
+ if state == 'present':
+ create_or_update_dynamo_table(connection, module)
+ elif state == 'absent':
+ delete_dynamo_table(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_ami_copy.py b/lib/ansible/modules/cloud/amazon/ec2_ami_copy.py
new file mode 100644
index 0000000000..71b3c611a8
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_ami_copy.py
@@ -0,0 +1,259 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_ami_copy
+short_description: copies AMI between AWS regions, return new image id
+description:
+ - Copies AMI from a source region to a destination region. This module has a dependency on python-boto >= 2.5
+version_added: "2.0"
+options:
+ source_region:
+ description:
+ - the source region that AMI should be copied from
+ required: true
+ source_image_id:
+ description:
+ - the id of the image in source region that should be copied
+ required: true
+ name:
+ description:
+ - The name of the new image to copy
+ required: false
+ default: null
+ description:
+ description:
+ - An optional human-readable string describing the contents and purpose of the new AMI.
+ required: false
+ default: null
+ encrypted:
+ description:
+ - Whether or not to encrypt the target image
+ required: false
+ default: null
+ version_added: "2.2"
+ kms_key_id:
+ description:
+ - KMS key id used to encrypt image. If not specified, uses default EBS Customer Master Key (CMK) for your account.
+ required: false
+ default: null
+ version_added: "2.2"
+ wait:
+ description:
+ - wait for the copied AMI to be in state 'available' before returning.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ required: false
+ default: 1200
+ tags:
+ description:
+ - a hash/dictionary of tags to add to the new copied AMI; '{"key":"value"}' and '{"key":"value","key":"value"}'
+ required: false
+ default: null
+
+author: Amir Moulavi <amir.moulavi@gmail.com>
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Basic AMI Copy
+- ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+
+# AMI copy wait until available
+- ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ wait: yes
+ register: image_id
+
+# Named AMI copy
+- ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ name: My-Awesome-AMI
+ description: latest patch
+
+# Tagged AMI copy
+- ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ tags:
+ Name: My-Super-AMI
+ Patch: 1.2.3
+
+# Encrypted AMI copy
+- ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ encrypted: yes
+
+# Encrypted AMI copy with specified key
+- ec2_ami_copy:
+ source_region: us-east-1
+ region: eu-west-1
+ source_image_id: ami-xxxxxxx
+ encrypted: yes
+ kms_key_id: arn:aws:kms:us-east-1:XXXXXXXXXXXX:key/746de6ea-50a4-4bcb-8fbc-e3b29f2d367b
+'''
+
+import time
+
+try:
+ import boto
+ import boto.ec2
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import ec2_argument_spec, ec2_connect, get_aws_connection_info
+
+
+def copy_image(module, ec2):
+ """
+ Copies an AMI
+
+ module : AnsibleModule object
+ ec2: authenticated ec2 connection object
+ """
+
+ source_region = module.params.get('source_region')
+ source_image_id = module.params.get('source_image_id')
+ name = module.params.get('name')
+ description = module.params.get('description')
+ encrypted = module.params.get('encrypted')
+ kms_key_id = module.params.get('kms_key_id')
+ tags = module.params.get('tags')
+ wait_timeout = int(module.params.get('wait_timeout'))
+ wait = module.params.get('wait')
+
+ try:
+ params = {'source_region': source_region,
+ 'source_image_id': source_image_id,
+ 'name': name,
+ 'description': description,
+ 'encrypted': encrypted,
+ 'kms_key_id': kms_key_id
+ }
+
+ image_id = ec2.copy_image(**params).image_id
+ except boto.exception.BotoServerError as e:
+ module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
+
+ img = wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait)
+
+ img = wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait)
+
+ register_tags_if_any(module, ec2, tags, image_id)
+
+ module.exit_json(msg="AMI copy operation complete", image_id=image_id, state=img.state, changed=True)
+
+
+# register tags to the copied AMI
+def register_tags_if_any(module, ec2, tags, image_id):
+ if tags:
+ try:
+ ec2.create_tags([image_id], tags)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+# wait here until the image is copied (i.e. the state becomes available
+def wait_until_image_is_copied(module, ec2, wait_timeout, img, image_id, wait):
+ wait_timeout = time.time() + wait_timeout
+ while wait and wait_timeout > time.time() and (img is None or img.state != 'available'):
+ img = ec2.get_image(image_id)
+ time.sleep(3)
+ if wait and wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="timed out waiting for image to be copied")
+ return img
+
+
+# wait until the image is recognized.
+def wait_until_image_is_recognized(module, ec2, wait_timeout, image_id, wait):
+ for i in range(wait_timeout):
+ try:
+ return ec2.get_image(image_id)
+ except boto.exception.EC2ResponseError as e:
+ # This exception we expect initially right after registering the copy with EC2 API
+ if 'InvalidAMIID.NotFound' in e.error_code and wait:
+ time.sleep(1)
+ else:
+ # On any other exception we should fail
+ module.fail_json(
+ msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help: " + str(
+ e))
+ else:
+ module.fail_json(msg="timed out waiting for image to be recognized")
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ source_region=dict(required=True),
+ source_image_id=dict(required=True),
+ name=dict(),
+ description=dict(default=""),
+ encrypted=dict(type='bool', required=False),
+ kms_key_id=dict(type='str', required=False),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(default=1200),
+ tags=dict(type='dict')))
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ try:
+ ec2 = ec2_connect(module)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=str(e))
+
+ try:
+ region, ec2_url, boto_params = get_aws_connection_info(module)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg=str(e))
+
+ if not region:
+ module.fail_json(msg="region must be specified")
+
+ copy_image(module, ec2)
+
+
+if __name__ == '__main__':
+ main()
+
diff --git a/lib/ansible/modules/cloud/amazon/ec2_asg_facts.py b/lib/ansible/modules/cloud/amazon/ec2_asg_facts.py
new file mode 100644
index 0000000000..3cd6e67860
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_asg_facts.py
@@ -0,0 +1,359 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_asg_facts
+short_description: Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS
+description:
+ - Gather facts about ec2 Auto Scaling Groups (ASGs) in AWS
+version_added: "2.2"
+author: "Rob White (@wimnat)"
+options:
+ name:
+ description:
+ - The prefix or name of the auto scaling group(s) you are searching for.
+ - "Note: This is a regular expression match with implicit '^' (beginning of string). Append '$' for a complete name match."
+ required: false
+ tags:
+ description:
+ - "A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling group(s) you are searching for."
+ required: false
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Find all groups
+- ec2_asg_facts:
+ register: asgs
+
+# Find a group with matching name/prefix
+- ec2_asg_facts:
+ name: public-webserver-asg
+ register: asgs
+
+# Find a group with matching tags
+- ec2_asg_facts:
+ tags:
+ project: webapp
+ env: production
+ register: asgs
+
+# Find a group with matching name/prefix and tags
+- ec2_asg_facts:
+ name: myproject
+ tags:
+ env: production
+ register: asgs
+
+# Fail if no groups are found
+- ec2_asg_facts:
+ name: public-webserver-asg
+ register: asgs
+ failed_when: "{{ asgs.results | length == 0 }}"
+
+# Fail if more than 1 group is found
+- ec2_asg_facts:
+ name: public-webserver-asg
+ register: asgs
+ failed_when: "{{ asgs.results | length > 1 }}"
+'''
+
+RETURN = '''
+---
+auto_scaling_group_arn:
+ description: The Amazon Resource Name of the ASG
+ returned: success
+ type: string
+ sample: "arn:aws:autoscaling:us-west-2:1234567890:autoScalingGroup:10787c52-0bcb-427d-82ba-c8e4b008ed2e:autoScalingGroupName/public-webapp-production-1"
+auto_scaling_group_name:
+ description: Name of autoscaling group
+ returned: success
+ type: str
+ sample: "public-webapp-production-1"
+availability_zones:
+ description: List of Availability Zones that are enabled for this ASG.
+ returned: success
+ type: list
+ sample: ["us-west-2a", "us-west-2b", "us-west-2a"]
+created_time:
+ description: The date and time this ASG was created, in ISO 8601 format.
+ returned: success
+ type: string
+ sample: "2015-11-25T00:05:36.309Z"
+default_cooldown:
+ description: The default cooldown time in seconds.
+ returned: success
+ type: int
+ sample: 300
+desired_capacity:
+ description: The number of EC2 instances that should be running in this group.
+ returned: success
+ type: int
+ sample: 3
+health_check_period:
+ description: Length of time in seconds after a new EC2 instance comes into service that Auto Scaling starts checking its health.
+ returned: success
+ type: int
+ sample: 30
+health_check_type:
+ description: The service you want the health status from, one of "EC2" or "ELB".
+ returned: success
+ type: str
+ sample: "ELB"
+instances:
+ description: List of EC2 instances and their status as it relates to the ASG.
+ returned: success
+ type: list
+ sample: [
+ {
+ "availability_zone": "us-west-2a",
+ "health_status": "Healthy",
+ "instance_id": "i-es22ad25",
+ "launch_configuration_name": "public-webapp-production-1",
+ "lifecycle_state": "InService",
+ "protected_from_scale_in": "false"
+ }
+ ]
+launch_configuration_name:
+ description: Name of launch configuration associated with the ASG.
+ returned: success
+ type: str
+ sample: "public-webapp-production-1"
+load_balancer_names:
+ description: List of load balancers names attached to the ASG.
+ returned: success
+ type: list
+ sample: ["elb-webapp-prod"]
+max_size:
+ description: Maximum size of group
+ returned: success
+ type: int
+ sample: 3
+min_size:
+ description: Minimum size of group
+ returned: success
+ type: int
+ sample: 1
+new_instances_protected_from_scale_in:
+ description: Whether or not new instances a protected from automatic scaling in.
+ returned: success
+ type: boolean
+ sample: "false"
+placement_group:
+ description: Placement group into which instances are launched, if any.
+ returned: success
+ type: str
+ sample: None
+status:
+ description: The current state of the group when DeleteAutoScalingGroup is in progress.
+ returned: success
+ type: str
+ sample: None
+tags:
+ description: List of tags for the ASG, and whether or not each tag propagates to instances at launch.
+ returned: success
+ type: list
+ sample: [
+ {
+ "key": "Name",
+ "value": "public-webapp-production-1",
+ "resource_id": "public-webapp-production-1",
+ "resource_type": "auto-scaling-group",
+ "propagate_at_launch": "true"
+ },
+ {
+ "key": "env",
+ "value": "production",
+ "resource_id": "public-webapp-production-1",
+ "resource_type": "auto-scaling-group",
+ "propagate_at_launch": "true"
+ }
+ ]
+termination_policies:
+ description: A list of termination policies for the group.
+ returned: success
+ type: str
+ sample: ["Default"]
+'''
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+def match_asg_tags(tags_to_match, asg):
+ for key, value in tags_to_match.iteritems():
+ for tag in asg['Tags']:
+ if key == tag['Key'] and value == tag['Value']:
+ break
+ else: return False
+ return True
+
+def find_asgs(conn, module, name=None, tags=None):
+ """
+ Args:
+ conn (boto3.AutoScaling.Client): Valid Boto3 ASG client.
+ name (str): Optional name of the ASG you are looking for.
+ tags (dict): Optional dictionary of tags and values to search for.
+
+ Basic Usage:
+ >>> name = 'public-webapp-production'
+ >>> tags = { 'env': 'production' }
+ >>> conn = boto3.client('autoscaling', region_name='us-west-2')
+ >>> results = find_asgs(name, conn)
+
+ Returns:
+ List
+ [
+ {
+ "auto_scaling_group_arn": "arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:autoScalingGroupName/public-webapp-production",
+ "auto_scaling_group_name": "public-webapp-production",
+ "availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"],
+ "created_time": "2016-02-02T23:28:42.481000+00:00",
+ "default_cooldown": 300,
+ "desired_capacity": 2,
+ "enabled_metrics": [],
+ "health_check_grace_period": 300,
+ "health_check_type": "ELB",
+ "instances":
+ [
+ {
+ "availability_zone": "us-west-2c",
+ "health_status": "Healthy",
+ "instance_id": "i-047a12cb",
+ "launch_configuration_name": "public-webapp-production-1",
+ "lifecycle_state": "InService",
+ "protected_from_scale_in": false
+ },
+ {
+ "availability_zone": "us-west-2a",
+ "health_status": "Healthy",
+ "instance_id": "i-7a29df2c",
+ "launch_configuration_name": "public-webapp-production-1",
+ "lifecycle_state": "InService",
+ "protected_from_scale_in": false
+ }
+ ],
+ "launch_configuration_name": "public-webapp-production-1",
+ "load_balancer_names": ["public-webapp-production-lb"],
+ "max_size": 4,
+ "min_size": 2,
+ "new_instances_protected_from_scale_in": false,
+ "placement_group": None,
+ "status": None,
+ "suspended_processes": [],
+ "tags":
+ [
+ {
+ "key": "Name",
+ "propagate_at_launch": true,
+ "resource_id": "public-webapp-production",
+ "resource_type": "auto-scaling-group",
+ "value": "public-webapp-production"
+ },
+ {
+ "key": "env",
+ "propagate_at_launch": true,
+ "resource_id": "public-webapp-production",
+ "resource_type": "auto-scaling-group",
+ "value": "production"
+ }
+ ],
+ "termination_policies":
+ [
+ "Default"
+ ],
+ "vpc_zone_identifier":
+ [
+ "subnet-a1b1c1d1",
+ "subnet-a2b2c2d2",
+ "subnet-a3b3c3d3"
+ ]
+ }
+ ]
+ """
+
+ try:
+ asgs = conn.describe_auto_scaling_groups()
+ except ClientError as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+
+ matched_asgs = []
+
+ if name is not None:
+ # if the user didn't specify a name
+ name_prog = re.compile(r'^' + name)
+
+ for asg in asgs['AutoScalingGroups']:
+ if name:
+ matched_name = name_prog.search(asg['AutoScalingGroupName'])
+ else:
+ matched_name = True
+
+ if tags:
+ matched_tags = match_asg_tags(tags, asg)
+ else:
+ matched_tags = True
+
+ if matched_name and matched_tags:
+ matched_asgs.append(camel_dict_to_snake_dict(asg))
+
+ return matched_asgs
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str'),
+ tags=dict(type='dict'),
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ asg_name = module.params.get('name')
+ asg_tags = module.params.get('tags')
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ autoscaling = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except ClientError as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+
+ results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags)
+ module.exit_json(results=results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_customer_gateway.py b/lib/ansible/modules/cloud/amazon/ec2_customer_gateway.py
new file mode 100644
index 0000000000..a8a74926cd
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_customer_gateway.py
@@ -0,0 +1,271 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_customer_gateway
+short_description: Manage an AWS customer gateway
+description:
+ - Manage an AWS customer gateway
+version_added: "2.2"
+author: Michael Baydoun (@MichaelBaydoun)
+requirements: [ botocore, boto3 ]
+notes:
+ - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.
+ - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use
+ customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.
+options:
+ bgp_asn:
+ description:
+ - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when state=present.
+ required: false
+ default: null
+ ip_address:
+ description:
+ - Internet-routable IP address for customers gateway, must be a static address.
+ required: true
+ name:
+ description:
+ - Name of the customer gateway.
+ required: true
+ state:
+ description:
+ - Create or terminate the Customer Gateway.
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+
+# Create Customer Gateway
+- ec2_customer_gateway:
+ bgp_asn: 12345
+ ip_address: 1.2.3.4
+ name: IndianapolisOffice
+ region: us-east-1
+ register: cgw
+
+# Delete Customer Gateway
+- ec2_customer_gateway:
+ ip_address: 1.2.3.4
+ name: IndianapolisOffice
+ state: absent
+ region: us-east-1
+ register: cgw
+'''
+
+RETURN = '''
+gateway.customer_gateways:
+ description: details about the gateway that was created.
+ returned: success
+ type: complex
+ contains:
+ bgp_asn:
+ description: The Border Gateway Autonomous System Number.
+ returned: when exists and gateway is available.
+ sample: 65123
+ type: string
+ customer_gateway_id:
+ description: gateway id assigned by amazon.
+ returned: when exists and gateway is available.
+ sample: cgw-cb6386a2
+ type: string
+ ip_address:
+ description: ip address of your gateway device.
+ returned: when exists and gateway is available.
+ sample: 1.2.3.4
+ type: string
+ state:
+ description: state of gateway.
+ returned: when gateway exists and is available.
+ state: available
+ type: string
+ tags:
+ description: any tags on the gateway.
+ returned: when gateway exists and is available, and when tags exist.
+ state: available
+ type: string
+ type:
+ description: encryption type.
+ returned: when gateway exists and is available.
+ sample: ipsec.1
+ type: string
+'''
+
+try:
+ from botocore.exceptions import ClientError
+ HAS_BOTOCORE = True
+except ImportError:
+ HAS_BOTOCORE = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import (boto3_conn, camel_dict_to_snake_dict,
+ ec2_argument_spec, get_aws_connection_info)
+
+
+class Ec2CustomerGatewayManager:
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+ self.ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except ClientError as e:
+ module.fail_json(msg=e.message)
+
+ def ensure_cgw_absent(self, gw_id):
+ response = self.ec2.delete_customer_gateway(
+ DryRun=False,
+ CustomerGatewayId=gw_id
+ )
+ return response
+
+ def ensure_cgw_present(self, bgp_asn, ip_address):
+ response = self.ec2.create_customer_gateway(
+ DryRun=False,
+ Type='ipsec.1',
+ PublicIp=ip_address,
+ BgpAsn=bgp_asn,
+ )
+ return response
+
+ def tag_cgw_name(self, gw_id, name):
+ response = self.ec2.create_tags(
+ DryRun=False,
+ Resources=[
+ gw_id,
+ ],
+ Tags=[
+ {
+ 'Key': 'Name',
+ 'Value': name
+ },
+ ]
+ )
+ return response
+
+ def describe_gateways(self, ip_address):
+ response = self.ec2.describe_customer_gateways(
+ DryRun=False,
+ Filters=[
+ {
+ 'Name': 'state',
+ 'Values': [
+ 'available',
+ ]
+ },
+ {
+ 'Name': 'ip-address',
+ 'Values': [
+ ip_address,
+ ]
+ }
+ ]
+ )
+ return response
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ bgp_asn=dict(required=False, type='int'),
+ ip_address=dict(required=True),
+ name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['bgp_asn'])
+ ]
+ )
+
+ if not HAS_BOTOCORE:
+ module.fail_json(msg='botocore is required.')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required.')
+
+ gw_mgr = Ec2CustomerGatewayManager(module)
+
+ name = module.params.get('name')
+
+ existing = gw_mgr.describe_gateways(module.params['ip_address'])
+ # describe_gateways returns a key of CustomerGateways where as create_gateway returns a
+ # key of CustomerGateway. For consistency, change it here
+ existing['CustomerGateway'] = existing['CustomerGateways']
+
+ results = dict(changed=False)
+ if module.params['state'] == 'present':
+ if existing['CustomerGateway']:
+ results['gateway'] = existing
+ if existing['CustomerGateway'][0]['Tags']:
+ tag_array = existing['CustomerGateway'][0]['Tags']
+ for key, value in enumerate(tag_array):
+ if value['Key'] == 'Name':
+ current_name = value['Value']
+ if current_name != name:
+ results['name'] = gw_mgr.tag_cgw_name(
+ results['gateway']['CustomerGateway'][0]['CustomerGatewayId'],
+ module.params['name'],
+ )
+ results['changed'] = True
+ else:
+ if not module.check_mode:
+ results['gateway'] = gw_mgr.ensure_cgw_present(
+ module.params['bgp_asn'],
+ module.params['ip_address'],
+ )
+ results['name'] = gw_mgr.tag_cgw_name(
+ results['gateway']['CustomerGateway']['CustomerGatewayId'],
+ module.params['name'],
+ )
+ results['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ if existing['CustomerGateway']:
+ results['gateway'] = existing
+ if not module.check_mode:
+ results['gateway'] = gw_mgr.ensure_cgw_absent(
+ existing['CustomerGateway'][0]['CustomerGatewayId']
+ )
+ results['changed'] = True
+
+ pretty_results = camel_dict_to_snake_dict(results)
+ module.exit_json(**pretty_results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_elb_facts.py b/lib/ansible/modules/cloud/amazon/ec2_elb_facts.py
new file mode 100644
index 0000000000..c4857f6a3c
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_elb_facts.py
@@ -0,0 +1,253 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_elb_facts
+short_description: Gather facts about EC2 Elastic Load Balancers in AWS
+description:
+ - Gather facts about EC2 Elastic Load Balancers in AWS
+version_added: "2.0"
+author:
+ - "Michael Schultz (github.com/mjschultz)"
+ - "Fernando Jose Pando (@nand0p)"
+options:
+ names:
+ description:
+ - List of ELB names to gather facts about. Pass this option to gather facts about a set of ELBs, otherwise, all ELBs are returned.
+ required: false
+ default: null
+ aliases: ['elb_ids', 'ec2_elbs']
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+# Output format tries to match ec2_elb_lb module input parameters
+
+# Gather facts about all ELBs
+- action:
+ module: ec2_elb_facts
+ register: elb_facts
+
+- action:
+ module: debug
+ msg: "{{ item.dns_name }}"
+ with_items: "{{ elb_facts.elbs }}"
+
+# Gather facts about a particular ELB
+- action:
+ module: ec2_elb_facts
+ names: frontend-prod-elb
+ register: elb_facts
+
+- action:
+ module: debug
+ msg: "{{ elb_facts.elbs.0.dns_name }}"
+
+# Gather facts about a set of ELBs
+- action:
+ module: ec2_elb_facts
+ names:
+ - frontend-prod-elb
+ - backend-prod-elb
+ register: elb_facts
+
+- action:
+ module: debug
+ msg: "{{ item.dns_name }}"
+ with_items: "{{ elb_facts.elbs }}"
+
+'''
+
+try:
+ import boto.ec2.elb
+ from boto.ec2.tag import Tag
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+class ElbInformation(object):
+ """ Handles ELB information """
+
+ def __init__(self,
+ module,
+ names,
+ region,
+ **aws_connect_params):
+
+ self.module = module
+ self.names = names
+ self.region = region
+ self.aws_connect_params = aws_connect_params
+ self.connection = self._get_elb_connection()
+
+ def _get_tags(self, elbname):
+ params = {'LoadBalancerNames.member.1': elbname}
+ try:
+ elb_tags = self.connection.get_list('DescribeTags', params, [('member', Tag)])
+ return dict((tag.Key, tag.Value) for tag in elb_tags if hasattr(tag, 'Key'))
+ except:
+ return {}
+
+ def _get_elb_connection(self):
+ try:
+ return connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
+ except BotoServerError as err:
+ self.module.fail_json(msg=err.message)
+
+ def _get_elb_listeners(self, listeners):
+ listener_list = []
+
+ for listener in listeners:
+ listener_dict = {
+ 'load_balancer_port': listener[0],
+ 'instance_port': listener[1],
+ 'protocol': listener[2],
+ }
+
+ try:
+ ssl_certificate_id = listener[4]
+ except IndexError:
+ pass
+ else:
+ if ssl_certificate_id:
+ listener_dict['ssl_certificate_id'] = ssl_certificate_id
+
+ listener_list.append(listener_dict)
+
+ return listener_list
+
+ def _get_health_check(self, health_check):
+ protocol, port_path = health_check.target.split(':')
+ try:
+ port, path = port_path.split('/', 1)
+ path = '/{}'.format(path)
+ except ValueError:
+ port = port_path
+ path = None
+
+ health_check_dict = {
+ 'ping_protocol': protocol.lower(),
+ 'ping_port': int(port),
+ 'response_timeout': health_check.timeout,
+ 'interval': health_check.interval,
+ 'unhealthy_threshold': health_check.unhealthy_threshold,
+ 'healthy_threshold': health_check.healthy_threshold,
+ }
+
+ if path:
+ health_check_dict['ping_path'] = path
+ return health_check_dict
+
+ def _get_elb_info(self, elb):
+ elb_info = {
+ 'name': elb.name,
+ 'zones': elb.availability_zones,
+ 'dns_name': elb.dns_name,
+ 'canonical_hosted_zone_name': elb.canonical_hosted_zone_name,
+ 'canonical_hosted_zone_name_id': elb.canonical_hosted_zone_name_id,
+ 'hosted_zone_name': elb.canonical_hosted_zone_name,
+ 'hosted_zone_id': elb.canonical_hosted_zone_name_id,
+ 'instances': [instance.id for instance in elb.instances],
+ 'listeners': self._get_elb_listeners(elb.listeners),
+ 'scheme': elb.scheme,
+ 'security_groups': elb.security_groups,
+ 'health_check': self._get_health_check(elb.health_check),
+ 'subnets': elb.subnets,
+ 'instances_inservice': [],
+ 'instances_inservice_count': 0,
+ 'instances_outofservice': [],
+ 'instances_outofservice_count': 0,
+ 'instances_inservice_percent': 0.0,
+ 'tags': self._get_tags(elb.name)
+ }
+
+ if elb.vpc_id:
+ elb_info['vpc_id'] = elb.vpc_id
+
+ if elb.instances:
+ try:
+ instance_health = self.connection.describe_instance_health(elb.name)
+ except BotoServerError as err:
+ self.module.fail_json(msg=err.message)
+ elb_info['instances_inservice'] = [inst.instance_id for inst in instance_health if inst.state == 'InService']
+ elb_info['instances_inservice_count'] = len(elb_info['instances_inservice'])
+ elb_info['instances_outofservice'] = [inst.instance_id for inst in instance_health if inst.state == 'OutOfService']
+ elb_info['instances_outofservice_count'] = len(elb_info['instances_outofservice'])
+ elb_info['instances_inservice_percent'] = float(elb_info['instances_inservice_count'])/(
+ float(elb_info['instances_inservice_count']) +
+ float(elb_info['instances_outofservice_count']))*100
+ return elb_info
+
+
+ def list_elbs(self):
+ elb_array = []
+
+ try:
+ all_elbs = self.connection.get_all_load_balancers()
+ except BotoServerError as err:
+ self.module.fail_json(msg = "%s: %s" % (err.error_code, err.error_message))
+
+ if all_elbs:
+ if self.names:
+ for existing_lb in all_elbs:
+ if existing_lb.name in self.names:
+ elb_array.append(existing_lb)
+ else:
+ elb_array = all_elbs
+
+ return list(map(self._get_elb_info, elb_array))
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ names={'default': [], 'type': 'list'}
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if not region:
+ module.fail_json(msg="region must be specified")
+
+ names = module.params['names']
+ elb_information = ElbInformation(module,
+ names,
+ region,
+ **aws_connect_params)
+
+ ec2_facts_result = dict(changed=False,
+ elbs=elb_information.list_elbs())
+
+ module.exit_json(**ec2_facts_result)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_eni.py b/lib/ansible/modules/cloud/amazon/ec2_eni.py
new file mode 100644
index 0000000000..aca78a459d
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_eni.py
@@ -0,0 +1,576 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_eni
+short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
+description:
+ - Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID or private_ip is \
+ provided, the existing ENI (if any) will be modified. The 'attached' parameter controls the attachment status \
+ of the network interface.
+version_added: "2.0"
+author: "Rob White (@wimnat)"
+options:
+ eni_id:
+ description:
+ - The ID of the ENI
+ required: false
+ default: null
+ instance_id:
+ description:
+ - Instance ID that you wish to attach ENI to. Since version 2.2, use the 'attached' parameter to attach or \
+ detach an ENI. Prior to 2.2, to detach an ENI from an instance, use 'None'.
+ required: false
+ default: null
+ private_ip_address:
+ description:
+ - Private IP address.
+ required: false
+ default: null
+ subnet_id:
+ description:
+ - ID of subnet in which to create the ENI. Only required when state=present.
+ required: true
+ description:
+ description:
+ - Optional description of the ENI.
+ required: false
+ default: null
+ security_groups:
+ description:
+ - List of security groups associated with the interface. Only used when state=present. Since version 2.2, you \
+ can specify security groups by ID or by name or a combination of both. Prior to 2.2, you can specify only by ID.
+ required: false
+ default: null
+ state:
+ description:
+ - Create or delete ENI
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ device_index:
+ description:
+ - The index of the device for the network interface attachment on the instance.
+ required: false
+ default: 0
+ attached:
+ description:
+ - Specifies if network interface should be attached or detached from instance. If ommited, attachment status \
+ won't change
+ required: false
+ default: yes
+ version_added: 2.2
+ force_detach:
+ description:
+ - Force detachment of the interface. This applies either when explicitly detaching the interface by setting instance_id to None or when deleting an interface with state=absent.
+ required: false
+ default: no
+ delete_on_termination:
+ description:
+ - Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the interface is being modified, not on creation.
+ required: false
+ source_dest_check:
+ description:
+ - By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled. You can only specify this flag when the interface is being modified, not on creation.
+ required: false
+ secondary_private_ip_addresses:
+ description:
+ - A list of IP addresses to assign as secondary IP addresses to the network interface. This option is mutually exclusive of secondary_private_ip_address_count
+ required: false
+ version_added: 2.2
+ secondary_private_ip_address_count:
+ description:
+ - The number of secondary IP addresses to assign to the network interface. This option is mutually exclusive of secondary_private_ip_addresses
+ required: false
+ version_added: 2.2
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create an ENI. As no security group is defined, ENI will be created in default security group
+- ec2_eni:
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+
+# Create an ENI and attach it to an instance
+- ec2_eni:
+ instance_id: i-xxxxxxx
+ device_index: 1
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+
+# Create an ENI with two secondary addresses
+- ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ state: present
+ secondary_private_ip_address_count: 2
+
+# Assign a secondary IP address to an existing ENI
+# This will purge any existing IPs
+- ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ eni_id: eni-yyyyyyyy
+ state: present
+ secondary_private_ip_addresses:
+ - 172.16.1.1
+
+# Remove any secondary IP addresses from an existing ENI
+- ec2_eni:
+ subnet_id: subnet-xxxxxxxx
+ eni_id: eni-yyyyyyyy
+ state: present
+ secondary_private_ip_addresses:
+ -
+
+# Destroy an ENI, detaching it from any instance if necessary
+- ec2_eni:
+ eni_id: eni-xxxxxxx
+ force_detach: yes
+ state: absent
+
+# Update an ENI
+- ec2_eni:
+ eni_id: eni-xxxxxxx
+ description: "My new description"
+ state: present
+
+# Detach an ENI from an instance
+- ec2_eni:
+ eni_id: eni-xxxxxxx
+ instance_id: None
+ state: present
+
+### Delete an interface on termination
+# First create the interface
+- ec2_eni:
+ instance_id: i-xxxxxxx
+ device_index: 1
+ private_ip_address: 172.31.0.20
+ subnet_id: subnet-xxxxxxxx
+ state: present
+ register: eni
+
+# Modify the interface to enable the delete_on_terminaton flag
+- ec2_eni:
+ eni_id: {{ "eni.interface.id" }}
+ delete_on_termination: true
+
+'''
+
+
+RETURN = '''
+interface:
+ description: Network interface attributes
+ returned: when state != absent
+ type: dictionary
+ contains:
+ description:
+ description: interface description
+ type: string
+ sample: Firewall network interface
+ groups:
+ description: list of security groups
+ type: list of dictionaries
+ sample: [ { "sg-f8a8a9da": "default" } ]
+ id:
+ description: network interface id
+ type: string
+ sample: "eni-1d889198"
+ mac_address:
+ description: interface's physical address
+ type: string
+ sample: "00:00:5E:00:53:23"
+ owner_id:
+ description: aws account id
+ type: string
+ sample: 812381371
+ private_ip_address:
+ description: primary ip address of this interface
+ type: string
+ sample: 10.20.30.40
+ private_ip_addresses:
+ description: list of all private ip addresses associated to this interface
+ type: list of dictionaries
+ sample: [ { "primary_address": true, "private_ip_address": "10.20.30.40" } ]
+ source_dest_check:
+ description: value of source/dest check flag
+ type: boolean
+ sample: True
+ status:
+ description: network interface status
+ type: string
+ sample: "pending"
+ subnet_id:
+ description: which vpc subnet the interface is bound
+ type: string
+ sample: subnet-b0a0393c
+ vpc_id:
+ description: which vpc this network interface is bound
+ type: string
+ sample: vpc-9a9a9da
+
+'''
+
+import time
+import re
+
+try:
+ import boto.ec2
+ import boto.vpc
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import (AnsibleAWSError, connect_to_aws,
+ ec2_argument_spec, get_aws_connection_info,
+ get_ec2_security_group_ids_from_names)
+
+
+def get_eni_info(interface):
+
+ # Private addresses
+ private_addresses = []
+ for ip in interface.private_ip_addresses:
+ private_addresses.append({ 'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary })
+
+ interface_info = {'id': interface.id,
+ 'subnet_id': interface.subnet_id,
+ 'vpc_id': interface.vpc_id,
+ 'description': interface.description,
+ 'owner_id': interface.owner_id,
+ 'status': interface.status,
+ 'mac_address': interface.mac_address,
+ 'private_ip_address': interface.private_ip_address,
+ 'source_dest_check': interface.source_dest_check,
+ 'groups': dict((group.id, group.name) for group in interface.groups),
+ 'private_ip_addresses': private_addresses
+ }
+
+ if interface.attachment is not None:
+ interface_info['attachment'] = {'attachment_id': interface.attachment.id,
+ 'instance_id': interface.attachment.instance_id,
+ 'device_index': interface.attachment.device_index,
+ 'status': interface.attachment.status,
+ 'attach_time': interface.attachment.attach_time,
+ 'delete_on_termination': interface.attachment.delete_on_termination,
+ }
+
+ return interface_info
+
+
+def wait_for_eni(eni, status):
+
+ while True:
+ time.sleep(3)
+ eni.update()
+ # If the status is detached we just need attachment to disappear
+ if eni.attachment is None:
+ if status == "detached":
+ break
+ else:
+ if status == "attached" and eni.attachment.status == "attached":
+ break
+
+
+def create_eni(connection, vpc_id, module):
+
+ instance_id = module.params.get("instance_id")
+ attached = module.params.get("attached")
+ if instance_id == 'None':
+ instance_id = None
+ device_index = module.params.get("device_index")
+ subnet_id = module.params.get('subnet_id')
+ private_ip_address = module.params.get('private_ip_address')
+ description = module.params.get('description')
+ security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection, vpc_id=vpc_id, boto3=False)
+ secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
+ secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
+ changed = False
+
+ try:
+ eni = find_eni(connection, module)
+ if eni is None:
+ eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups)
+ if attached == True and instance_id is not None:
+ try:
+ eni.attach(instance_id, device_index)
+ except BotoServerError:
+ eni.delete()
+ raise
+ # Wait to allow creation / attachment to finish
+ wait_for_eni(eni, "attached")
+ eni.update()
+
+ if secondary_private_ip_address_count is not None:
+ try:
+ connection.assign_private_ip_addresses(network_interface_id=eni.id, secondary_private_ip_address_count=secondary_private_ip_address_count)
+ except BotoServerError:
+ eni.delete()
+ raise
+
+ if secondary_private_ip_addresses is not None:
+ try:
+ connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=secondary_private_ip_addresses)
+ except BotoServerError:
+ eni.delete()
+ raise
+
+ changed = True
+
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed, interface=get_eni_info(eni))
+
+
+def modify_eni(connection, vpc_id, module, eni):
+
+ instance_id = module.params.get("instance_id")
+ attached = module.params.get("attached")
+ do_detach = module.params.get('state') == 'detached'
+ device_index = module.params.get("device_index")
+ description = module.params.get('description')
+ security_groups = module.params.get('security_groups')
+ force_detach = module.params.get("force_detach")
+ source_dest_check = module.params.get("source_dest_check")
+ delete_on_termination = module.params.get("delete_on_termination")
+ secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
+ secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
+ changed = False
+
+ try:
+ if description is not None:
+ if eni.description != description:
+ connection.modify_network_interface_attribute(eni.id, "description", description)
+ changed = True
+ if len(security_groups) > 0:
+ groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=vpc_id, boto3=False)
+ if sorted(get_sec_group_list(eni.groups)) != sorted(groups):
+ connection.modify_network_interface_attribute(eni.id, "groupSet", groups)
+ changed = True
+ if source_dest_check is not None:
+ if eni.source_dest_check != source_dest_check:
+ connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check)
+ changed = True
+ if delete_on_termination is not None and eni.attachment is not None:
+ if eni.attachment.delete_on_termination is not delete_on_termination:
+ connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id)
+ changed = True
+
+ current_secondary_addresses = [i.private_ip_address for i in eni.private_ip_addresses if not i.primary]
+ if secondary_private_ip_addresses is not None:
+ secondary_addresses_to_remove = list(set(current_secondary_addresses) - set(secondary_private_ip_addresses))
+ if secondary_addresses_to_remove:
+ connection.unassign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=list(set(current_secondary_addresses) - set(secondary_private_ip_addresses)), dry_run=False)
+ connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=secondary_private_ip_addresses, secondary_private_ip_address_count=None, allow_reassignment=False, dry_run=False)
+ if secondary_private_ip_address_count is not None:
+ current_secondary_address_count = len(current_secondary_addresses)
+
+ if secondary_private_ip_address_count > current_secondary_address_count:
+ connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=None, secondary_private_ip_address_count=(secondary_private_ip_address_count - current_secondary_address_count), allow_reassignment=False, dry_run=False)
+ changed = True
+ elif secondary_private_ip_address_count < current_secondary_address_count:
+ # How many of these addresses do we want to remove
+ secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count
+ connection.unassign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=current_secondary_addresses[:secondary_addresses_to_remove_count], dry_run=False)
+
+ if attached == True:
+ if eni.attachment and eni.attachment.instance_id != instance_id:
+ detach_eni(eni, module)
+ if eni.attachment is None:
+ eni.attach(instance_id, device_index)
+ wait_for_eni(eni, "attached")
+ changed = True
+ elif attached == False:
+ detach_eni(eni, module)
+
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ eni.update()
+ module.exit_json(changed=changed, interface=get_eni_info(eni))
+
+
+def delete_eni(connection, module):
+
+ eni_id = module.params.get("eni_id")
+ force_detach = module.params.get("force_detach")
+
+ try:
+ eni_result_set = connection.get_all_network_interfaces(eni_id)
+ eni = eni_result_set[0]
+
+ if force_detach is True:
+ if eni.attachment is not None:
+ eni.detach(force_detach)
+ # Wait to allow detachment to finish
+ wait_for_eni(eni, "detached")
+ eni.update()
+ eni.delete()
+ changed = True
+ else:
+ eni.delete()
+ changed = True
+
+ module.exit_json(changed=changed)
+ except BotoServerError as e:
+ regex = re.compile('The networkInterface ID \'.*\' does not exist')
+ if regex.search(e.message) is not None:
+ module.exit_json(changed=False)
+ else:
+ module.fail_json(msg=e.message)
+
+
+def detach_eni(eni, module):
+
+ force_detach = module.params.get("force_detach")
+ if eni.attachment is not None:
+ eni.detach(force_detach)
+ wait_for_eni(eni, "detached")
+ eni.update()
+ module.exit_json(changed=True, interface=get_eni_info(eni))
+ else:
+ module.exit_json(changed=False, interface=get_eni_info(eni))
+
+
+def find_eni(connection, module):
+
+ eni_id = module.params.get("eni_id")
+ subnet_id = module.params.get('subnet_id')
+ private_ip_address = module.params.get('private_ip_address')
+ instance_id = module.params.get('instance_id')
+ device_index = module.params.get('device_index')
+
+ try:
+ filters = {}
+ if subnet_id:
+ filters['subnet-id'] = subnet_id
+ if private_ip_address:
+ filters['private-ip-address'] = private_ip_address
+ else:
+ if instance_id:
+ filters['attachment.instance-id'] = instance_id
+ if device_index:
+ filters['attachment.device-index'] = device_index
+
+ eni_result = connection.get_all_network_interfaces(eni_id, filters=filters)
+ if len(eni_result) > 0:
+ return eni_result[0]
+ else:
+ return None
+
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ return None
+
+
+def get_sec_group_list(groups):
+
+ # Build list of remote security groups
+ remote_security_groups = []
+ for group in groups:
+ remote_security_groups.append(group.id.encode())
+
+ return remote_security_groups
+
+
+def _get_vpc_id(connection, module, subnet_id):
+
+ try:
+ return connection.get_all_subnets(subnet_ids=[subnet_id])[0].vpc_id
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ eni_id=dict(default=None, type='str'),
+ instance_id=dict(default=None, type='str'),
+ private_ip_address=dict(type='str'),
+ subnet_id=dict(type='str'),
+ description=dict(type='str'),
+ security_groups=dict(default=[], type='list'),
+ device_index=dict(default=0, type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ force_detach=dict(default='no', type='bool'),
+ source_dest_check=dict(default=None, type='bool'),
+ delete_on_termination=dict(default=None, type='bool'),
+ secondary_private_ip_addresses=dict(default=None, type='list'),
+ secondary_private_ip_address_count=dict(default=None, type='int'),
+ attached=dict(default=None, type='bool')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['secondary_private_ip_addresses', 'secondary_private_ip_address_count']
+ ],
+ required_if=([
+ ('state', 'present', ['subnet_id']),
+ ('state', 'absent', ['eni_id']),
+ ('attached', True, ['instance_id'])
+ ])
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
+ vpc_connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ state = module.params.get("state")
+ eni_id = module.params.get("eni_id")
+ private_ip_address = module.params.get('private_ip_address')
+
+ if state == 'present':
+ subnet_id = module.params.get("subnet_id")
+ vpc_id = _get_vpc_id(vpc_connection, module, subnet_id)
+
+ eni = find_eni(connection, module)
+ if eni is None:
+ create_eni(connection, vpc_id, module)
+ else:
+ modify_eni(connection, vpc_id, module, eni)
+
+ elif state == 'absent':
+ delete_eni(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_eni_facts.py b/lib/ansible/modules/cloud/amazon/ec2_eni_facts.py
new file mode 100644
index 0000000000..972dcaf4ae
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_eni_facts.py
@@ -0,0 +1,185 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_eni_facts
+short_description: Gather facts about ec2 ENI interfaces in AWS
+description:
+ - Gather facts about ec2 ENI interfaces in AWS
+version_added: "2.0"
+author: "Rob White (@wimnat)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters.
+ required: false
+ default: null
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all ENIs
+- ec2_eni_facts:
+
+# Gather facts about a particular ENI
+- ec2_eni_facts:
+ filters:
+ network-interface-id: eni-xxxxxxx
+
+'''
+
+try:
+ import boto.ec2
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError, NoCredentialsError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import (AnsibleAWSError,
+ ansible_dict_to_boto3_filter_list, boto3_conn,
+ boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
+ connect_to_aws, ec2_argument_spec, get_aws_connection_info)
+
+
+def list_ec2_snapshots_boto3(connection, module):
+
+ if module.params.get("filters") is None:
+ filters = []
+ else:
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ try:
+ network_interfaces_result = connection.describe_network_interfaces(Filters=filters)
+ except (ClientError, NoCredentialsError) as e:
+ module.fail_json(msg=e.message)
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_network_interfaces_result = camel_dict_to_snake_dict(network_interfaces_result)
+ for network_interfaces in snaked_network_interfaces_result['network_interfaces']:
+ network_interfaces['tag_set'] = boto3_tag_list_to_ansible_dict(network_interfaces['tag_set'])
+
+ module.exit_json(**snaked_network_interfaces_result)
+
+
+def get_eni_info(interface):
+
+ # Private addresses
+ private_addresses = []
+ for ip in interface.private_ip_addresses:
+ private_addresses.append({ 'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary })
+
+ interface_info = {'id': interface.id,
+ 'subnet_id': interface.subnet_id,
+ 'vpc_id': interface.vpc_id,
+ 'description': interface.description,
+ 'owner_id': interface.owner_id,
+ 'status': interface.status,
+ 'mac_address': interface.mac_address,
+ 'private_ip_address': interface.private_ip_address,
+ 'source_dest_check': interface.source_dest_check,
+ 'groups': dict((group.id, group.name) for group in interface.groups),
+ 'private_ip_addresses': private_addresses
+ }
+
+ if hasattr(interface, 'publicDnsName'):
+ interface_info['association'] = {'public_ip_address': interface.publicIp,
+ 'public_dns_name': interface.publicDnsName,
+ 'ip_owner_id': interface.ipOwnerId
+ }
+
+ if interface.attachment is not None:
+ interface_info['attachment'] = {'attachment_id': interface.attachment.id,
+ 'instance_id': interface.attachment.instance_id,
+ 'device_index': interface.attachment.device_index,
+ 'status': interface.attachment.status,
+ 'attach_time': interface.attachment.attach_time,
+ 'delete_on_termination': interface.attachment.delete_on_termination,
+ }
+
+ return interface_info
+
+
+def list_eni(connection, module):
+
+ filters = module.params.get("filters")
+ interface_dict_array = []
+
+ try:
+ all_eni = connection.get_all_network_interfaces(filters=filters)
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ for interface in all_eni:
+ interface_dict_array.append(get_eni_info(interface))
+
+ module.exit_json(interfaces=interface_dict_array)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ filters = dict(default=None, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ if HAS_BOTO3:
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+
+ if region:
+ connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_snapshots_boto3(connection, module)
+ else:
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_eni(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_group_facts.py b/lib/ansible/modules/cloud/amazon/ec2_group_facts.py
new file mode 100644
index 0000000000..ccb4aa64e3
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_group_facts.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_group_facts
+short_description: Gather facts about ec2 security groups in AWS.
+description:
+ - Gather facts about ec2 security groups in AWS.
+version_added: "2.3"
+author: "Henrique Rodrigues (github.com/Sodki)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \
+ U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSecurityGroups.html) for \
+ possible filters. Filter names and values are case sensitive. You can also use underscores (_) \
+ instead of dashes (-) in the filter keys, which will take precedence in case of conflict.
+ required: false
+ default: {}
+notes:
+ - By default, the module will return all security groups. To limit results use the appropriate filters.
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all security groups
+- ec2_group_facts:
+
+# Gather facts about all security groups in a specific VPC
+- ec2_group_facts:
+ filters:
+ vpc-id: vpc-12345678
+
+# Gather facts about all security groups in a specific VPC
+- ec2_group_facts:
+ filters:
+ vpc-id: vpc-12345678
+
+# Gather facts about a security group
+- ec2_group_facts:
+ filters:
+ group-name: example-1
+
+# Gather facts about a security group by id
+- ec2_group_facts:
+ filters:
+ group-id: sg-12345678
+
+# Gather facts about a security group with multiple filters, also mixing the use of underscores as filter keys
+- ec2_group_facts:
+ filters:
+ group_id: sg-12345678
+ vpc-id: vpc-12345678
+
+# Gather facts about various security groups
+- ec2_group_facts:
+ filters:
+ group-name:
+ - example-1
+ - example-2
+ - example-3
+
+# Gather facts about any security group with a tag key Name and value Example. The quotes around 'tag:name' are important because of the colon in the value
+- ec2_group_facts:
+ filters:
+ "tag:Name": Example
+'''
+
+RETURN = '''
+security_groups:
+ description: Security groups that match the provided filters. Each element consists of a dict with all the information related to that security group.
+ type: list
+ sample:
+'''
+
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = Falsentry
+
+import traceback
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ filters=dict(default={}, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+
+ if region:
+ connection = boto3_conn(
+ module,
+ conn_type='client',
+ resource='ec2',
+ region=region,
+ endpoint=ec2_url,
+ **aws_connect_params
+ )
+ else:
+ module.fail_json(msg="region must be specified")
+
+ # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags
+ sanitized_filters = module.params.get("filters")
+ for key in sanitized_filters:
+ if not key.startswith("tag:"):
+ sanitized_filters[key.replace("_", "-")] = sanitized_filters.pop(key)
+
+ try:
+ security_groups = connection.describe_security_groups(
+ Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)
+ )
+ except ClientError as e:
+ module.fail_json(msg=e.message, exception=traceback.format_exc(e))
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_security_groups = []
+ for security_group in security_groups['SecurityGroups']:
+ snaked_security_groups.append(camel_dict_to_snake_dict(security_group))
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for security_group in snaked_security_groups:
+ if 'tags' in security_group:
+ security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group['tags'])
+
+ module.exit_json(security_groups=snaked_security_groups)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_lc_facts.py b/lib/ansible/modules/cloud/amazon/ec2_lc_facts.py
new file mode 100644
index 0000000000..b81ce8975b
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_lc_facts.py
@@ -0,0 +1,229 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_lc_facts
+short_description: Gather facts about AWS Autoscaling Launch Configurations
+description:
+ - Gather facts about AWS Autoscaling Launch Configurations
+version_added: "2.3"
+author: "Loïc Latreille (@psykotox)"
+requirements: [ boto3 ]
+options:
+ name:
+ description:
+ - A name or a list of name to match.
+ required: false
+ default: []
+ sort:
+ description:
+ - Optional attribute which with to sort the results.
+ choices: ['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']
+ default: null
+ required: false
+ sort_order:
+ description:
+ - Order in which to sort results.
+ - Only used when the 'sort' parameter is specified.
+ choices: ['ascending', 'descending']
+ default: 'ascending'
+ required: false
+ sort_start:
+ description:
+ - Which result to start with (when sorting).
+ - Corresponds to Python slice notation.
+ default: null
+ required: false
+ sort_end:
+ description:
+ - Which result to end with (when sorting).
+ - Corresponds to Python slice notation.
+ default: null
+ required: false
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all launch configurations
+- ec2_lc_facts:
+
+# Gather facts about launch configuration with name "example"
+- ec2_lc_facts:
+ name: example
+
+# Gather facts sorted by created_time from most recent to least recent
+- ec2_lc_facts:
+ sort: created_time
+ sort_order: descending
+'''
+
+RETURN = '''
+block_device_mapping:
+ description: Block device mapping for the instances of launch configuration
+ type: list of block devices
+ sample: "[{
+ 'device_name': '/dev/xvda':,
+ 'ebs': {
+ 'delete_on_termination': true,
+ 'volume_size': 8,
+ 'volume_type': 'gp2'
+ }]"
+classic_link_vpc_security_groups:
+ description: IDs of one or more security groups for the VPC specified in classic_link_vpc_id
+ type: string
+ sample:
+created_time:
+ description: The creation date and time for the launch configuration
+ type: string
+ sample: "2016-05-27T13:47:44.216000+00:00"
+ebs_optimized:
+ description: EBS I/O optimized (true ) or not (false )
+ type: bool
+ sample: true,
+image_id:
+ description: ID of the Amazon Machine Image (AMI)
+ type: string
+ sample: "ami-12345678"
+instance_monitoring:
+ description: Launched with detailed monitoring or not
+ type: dict
+ sample: "{
+ 'enabled': true
+ }"
+instance_type:
+ description: Instance type
+ type: string
+ sample: "t2.micro"
+kernel_id:
+ description: ID of the kernel associated with the AMI
+ type: string
+ sample:
+key_name:
+ description: Name of the key pair
+ type: string
+ sample: "user_app"
+launch_configuration_arn:
+ description: Amazon Resource Name (ARN) of the launch configuration
+ type: string
+ sample: "arn:aws:autoscaling:us-east-1:666612345678:launchConfiguration:ba785e3a-dd42-6f02-4585-ea1a2b458b3d:launchConfigurationName/lc-app"
+launch_configuration_name:
+ description: Name of the launch configuration
+ type: string
+ sample: "lc-app"
+ramdisk_id:
+ description: ID of the RAM disk associated with the AMI
+ type: string
+ sample:
+security_groups:
+ description: Security groups to associated
+ type: list
+ sample: "[
+ 'web'
+ ]"
+user_data:
+ description: User data available
+ type: string
+ sample:
+'''
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError, NoCredentialsError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+def list_launch_configs(connection, module):
+
+ launch_config_name = module.params.get("name")
+ sort = module.params.get('sort')
+ sort_order = module.params.get('sort_order')
+ sort_start = module.params.get('sort_start')
+ sort_end = module.params.get('sort_end')
+
+ try:
+ launch_configs = connection.describe_launch_configurations(LaunchConfigurationNames=launch_config_name)
+ except ClientError as e:
+ module.fail_json(msg=e.message)
+
+ snaked_launch_configs = []
+ for launch_config in launch_configs['LaunchConfigurations']:
+ snaked_launch_configs.append(camel_dict_to_snake_dict(launch_config))
+
+ for launch_config in snaked_launch_configs:
+ if 'CreatedTime' in launch_config:
+ launch_config['CreatedTime'] = str(launch_config['CreatedTime'])
+
+ if sort:
+ snaked_launch_configs.sort(key=lambda e: e[sort], reverse=(sort_order=='descending'))
+
+ try:
+ if sort and sort_start and sort_end:
+ snaked_launch_configs = snaked_launch_configs[int(sort_start):int(sort_end)]
+ elif sort and sort_start:
+ snaked_launch_configs = snaked_launch_configs[int(sort_start):]
+ elif sort and sort_end:
+ snaked_launch_configs = snaked_launch_configs[:int(sort_end)]
+ except TypeError:
+ module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end")
+
+ module.exit_json(launch_configurations=snaked_launch_configs)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name = dict(required=False, default=[], type='list'),
+ sort = dict(required=False, default=None,
+ choices=['launch_configuration_name', 'image_id', 'created_time', 'instance_type', 'kernel_id', 'ramdisk_id', 'key_name']),
+ sort_order = dict(required=False, default='ascending',
+ choices=['ascending', 'descending']),
+ sort_start = dict(required=False),
+ sort_end = dict(required=False),
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+
+ if region:
+ connection = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_params)
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_launch_configs(connection, module)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_lc_find.py b/lib/ansible/modules/cloud/amazon/ec2_lc_find.py
new file mode 100644
index 0000000000..d6c515d6ff
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_lc_find.py
@@ -0,0 +1,229 @@
+#!/usr/bin/python
+# encoding: utf-8
+
+# (c) 2015, Jose Armesto <jose@armesto.net>
+#
+# This file is part of Ansible
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: ec2_lc_find
+short_description: Find AWS Autoscaling Launch Configurations
+description:
+ - Returns list of matching Launch Configurations for a given name, along with other useful information
+ - Results can be sorted and sliced
+ - It depends on boto
+ - Based on the work by Tom Bamford (https://github.com/tombamford)
+
+version_added: "2.2"
+author: "Jose Armesto (@fiunchinho)"
+options:
+ region:
+ description:
+ - The AWS region to use.
+ required: true
+ aliases: ['aws_region', 'ec2_region']
+ name_regex:
+ description:
+ - A Launch Configuration to match
+ - It'll be compiled as regex
+ required: True
+ sort_order:
+ description:
+ - Order in which to sort results.
+ choices: ['ascending', 'descending']
+ default: 'ascending'
+ required: false
+ limit:
+ description:
+ - How many results to show.
+ - Corresponds to Python slice notation like list[:limit].
+ default: null
+ required: false
+requirements:
+ - "python >= 2.6"
+ - boto3
+"""
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Search for the Launch Configurations that start with "app"
+- ec2_lc_find:
+ name_regex: app.*
+ sort_order: descending
+ limit: 2
+'''
+
+RETURN = '''
+image_id:
+ description: AMI id
+ returned: when Launch Configuration was found
+ type: string
+ sample: "ami-0d75df7e"
+user_data:
+ description: User data used to start instance
+ returned: when Launch Configuration was found
+ type: string
+ user_data: "ZXhwb3J0IENMT1VE"
+name:
+ description: Name of the AMI
+ returned: when Launch Configuration was found
+ type: string
+ sample: "myapp-v123"
+arn:
+ description: Name of the AMI
+ returned: when Launch Configuration was found
+ type: string
+ sample: "arn:aws:autoscaling:eu-west-1:12345:launchConfiguration:d82f050e-e315:launchConfigurationName/yourproject"
+instance_type:
+ description: Type of ec2 instance
+ returned: when Launch Configuration was found
+ type: string
+ sample: "t2.small"
+created_time:
+ description: When it was created
+ returned: when Launch Configuration was found
+ type: string
+ sample: "2016-06-29T14:59:22.222000+00:00"
+ebs_optimized:
+ description: Launch Configuration EBS optimized property
+ returned: when Launch Configuration was found
+ type: boolean
+ sample: False
+instance_monitoring:
+ description: Launch Configuration instance monitoring property
+ returned: when Launch Configuration was found
+ type: string
+ sample: {"Enabled": false}
+classic_link_vpc_security_groups:
+ description: Launch Configuration classic link vpc security groups property
+ returned: when Launch Configuration was found
+ type: list
+ sample: []
+block_device_mappings:
+ description: Launch Configuration block device mappings property
+ returned: when Launch Configuration was found
+ type: list
+ sample: []
+keyname:
+ description: Launch Configuration ssh key
+ returned: when Launch Configuration was found
+ type: string
+ sample: mykey
+security_groups:
+ description: Launch Configuration security groups
+ returned: when Launch Configuration was found
+ type: list
+ sample: []
+kernel_id:
+ description: Launch Configuration kernel to use
+ returned: when Launch Configuration was found
+ type: string
+ sample: ''
+ram_disk_id:
+ description: Launch Configuration ram disk property
+ returned: when Launch Configuration was found
+ type: string
+ sample: ''
+associate_public_address:
+ description: Assign public address or not
+ returned: when Launch Configuration was found
+ type: boolean
+ sample: True
+...
+'''
+
+
+def find_launch_configs(client, module):
+ name_regex = module.params.get('name_regex')
+ sort_order = module.params.get('sort_order')
+ limit = module.params.get('limit')
+
+ paginator = client.get_paginator('describe_launch_configurations')
+
+ response_iterator = paginator.paginate(
+ PaginationConfig={
+ 'MaxItems': 1000,
+ 'PageSize': 100
+ }
+ )
+
+ results = []
+
+ for response in response_iterator:
+ response['LaunchConfigurations'] = filter(lambda lc: re.compile(name_regex).match(lc['LaunchConfigurationName']),
+ response['LaunchConfigurations'])
+
+ for lc in response['LaunchConfigurations']:
+ data = {
+ 'name': lc['LaunchConfigurationName'],
+ 'arn': lc['LaunchConfigurationARN'],
+ 'created_time': lc['CreatedTime'],
+ 'user_data': lc['UserData'],
+ 'instance_type': lc['InstanceType'],
+ 'image_id': lc['ImageId'],
+ 'ebs_optimized': lc['EbsOptimized'],
+ 'instance_monitoring': lc['InstanceMonitoring'],
+ 'classic_link_vpc_security_groups': lc['ClassicLinkVPCSecurityGroups'],
+ 'block_device_mappings': lc['BlockDeviceMappings'],
+ 'keyname': lc['KeyName'],
+ 'security_groups': lc['SecurityGroups'],
+ 'kernel_id': lc['KernelId'],
+ 'ram_disk_id': lc['RamdiskId'],
+ 'associate_public_address': lc.get('AssociatePublicIpAddress', False),
+ }
+
+ results.append(data)
+
+ results.sort(key=lambda e: e['name'], reverse=(sort_order == 'descending'))
+
+ if limit:
+ results = results[:int(limit)]
+
+ module.exit_json(changed=False, results=results)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ region=dict(required=True, aliases=['aws_region', 'ec2_region']),
+ name_regex=dict(required=True),
+ sort_order=dict(required=False, default='ascending', choices=['ascending', 'descending']),
+ limit=dict(required=False, type='int'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, True)
+
+ client = boto3_conn(module=module, conn_type='client', resource='autoscaling', region=region, **aws_connect_params)
+ find_launch_configs(client, module)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_remote_facts.py b/lib/ansible/modules/cloud/amazon/ec2_remote_facts.py
new file mode 100644
index 0000000000..98ea16628f
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_remote_facts.py
@@ -0,0 +1,192 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_remote_facts
+short_description: Gather facts about ec2 instances in AWS
+description:
+ - Gather facts about ec2 instances in AWS
+version_added: "2.0"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters.
+ required: false
+ default: null
+author:
+ - "Michael Schuett (@michaeljs1990)"
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all ec2 instances
+- ec2_remote_facts:
+
+# Gather facts about all running ec2 instances with a tag of Name:Example
+- ec2_remote_facts:
+ filters:
+ instance-state-name: running
+ "tag:Name": Example
+
+# Gather facts about instance i-123456
+- ec2_remote_facts:
+ filters:
+ instance-id: i-123456
+
+# Gather facts about all instances in vpc-123456 that are t2.small type
+- ec2_remote_facts:
+ filters:
+ vpc-id: vpc-123456
+ instance-type: t2.small
+
+'''
+
+try:
+ import boto.ec2
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
+
+
+def get_instance_info(instance):
+
+ # Get groups
+ groups = []
+ for group in instance.groups:
+ groups.append({ 'id': group.id, 'name': group.name }.copy())
+
+ # Get interfaces
+ interfaces = []
+ for interface in instance.interfaces:
+ interfaces.append({ 'id': interface.id, 'mac_address': interface.mac_address }.copy())
+
+ # If an instance is terminated, sourceDestCheck is no longer returned
+ try:
+ source_dest_check = instance.sourceDestCheck
+ except AttributeError:
+ source_dest_check = None
+
+ # Get block device mapping
+ try:
+ bdm_dict = []
+ bdm = getattr(instance, 'block_device_mapping')
+ for device_name in bdm.keys():
+ bdm_dict.append({
+ 'device_name': device_name,
+ 'status': bdm[device_name].status,
+ 'volume_id': bdm[device_name].volume_id,
+ 'delete_on_termination': bdm[device_name].delete_on_termination,
+ 'attach_time': bdm[device_name].attach_time
+ })
+ except AttributeError:
+ pass
+
+ instance_info = { 'id': instance.id,
+ 'kernel': instance.kernel,
+ 'instance_profile': instance.instance_profile,
+ 'root_device_type': instance.root_device_type,
+ 'private_dns_name': instance.private_dns_name,
+ 'public_dns_name': instance.public_dns_name,
+ 'ebs_optimized': instance.ebs_optimized,
+ 'client_token': instance.client_token,
+ 'virtualization_type': instance.virtualization_type,
+ 'architecture': instance.architecture,
+ 'ramdisk': instance.ramdisk,
+ 'tags': instance.tags,
+ 'key_name': instance.key_name,
+ 'source_destination_check': source_dest_check,
+ 'image_id': instance.image_id,
+ 'groups': groups,
+ 'interfaces': interfaces,
+ 'spot_instance_request_id': instance.spot_instance_request_id,
+ 'requester_id': instance.requester_id,
+ 'monitoring_state': instance.monitoring_state,
+ 'placement': {
+ 'tenancy': instance._placement.tenancy,
+ 'zone': instance._placement.zone
+ },
+ 'ami_launch_index': instance.ami_launch_index,
+ 'launch_time': instance.launch_time,
+ 'hypervisor': instance.hypervisor,
+ 'region': instance.region.name,
+ 'persistent': instance.persistent,
+ 'private_ip_address': instance.private_ip_address,
+ 'public_ip_address': instance.ip_address,
+ 'state': instance._state.name,
+ 'vpc_id': instance.vpc_id,
+ 'block_device_mapping': bdm_dict,
+ }
+
+ return instance_info
+
+
+def list_ec2_instances(connection, module):
+
+ filters = module.params.get("filters")
+ instance_dict_array = []
+
+ try:
+ all_instances = connection.get_only_instances(filters=filters)
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ for instance in all_instances:
+ instance_dict_array.append(get_instance_info(instance))
+
+ module.exit_json(instances=instance_dict_array)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ filters = dict(default=None, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_instances(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py b/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py
new file mode 100644
index 0000000000..1fd9196098
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_snapshot_facts
+short_description: Gather facts about ec2 volume snapshots in AWS
+description:
+ - Gather facts about ec2 volume snapshots in AWS
+version_added: "2.1"
+author: "Rob White (@wimnat)"
+options:
+ snapshot_ids:
+ description:
+ - If you specify one or more snapshot IDs, only snapshots that have the specified IDs are returned.
+ required: false
+ default: []
+ owner_ids:
+ description:
+ - If you specify one or more snapshot owners, only snapshots from the specified owners and for which you have \
+ access are returned.
+ required: false
+ default: []
+ restorable_by_user_ids:
+ description:
+ - If you specify a list of restorable users, only snapshots with create snapshot permissions for those users are \
+ returned.
+ required: false
+ default: []
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \
+ U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html) for possible filters. Filter \
+ names and values are case sensitive.
+ required: false
+ default: {}
+notes:
+ - By default, the module will return all snapshots, including public ones. To limit results to snapshots owned by \
+ the account use the filter 'owner-id'.
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all snapshots, including public ones
+- ec2_snapshot_facts:
+
+# Gather facts about all snapshots owned by the account 0123456789
+- ec2_snapshot_facts:
+ filters:
+ owner-id: 0123456789
+
+# Or alternatively...
+- ec2_snapshot_facts:
+ owner_ids:
+ - 0123456789
+
+# Gather facts about a particular snapshot using ID
+- ec2_snapshot_facts:
+ filters:
+ snapshot-id: snap-00112233
+
+# Or alternatively...
+- ec2_snapshot_facts:
+ snapshot_ids:
+ - snap-00112233
+
+# Gather facts about any snapshot with a tag key Name and value Example
+- ec2_snapshot_facts:
+ filters:
+ "tag:Name": Example
+
+# Gather facts about any snapshot with an error status
+- ec2_snapshot_facts:
+ filters:
+ status: error
+
+'''
+
+RETURN = '''
+snapshot_id:
+ description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created.
+ type: string
+ sample: snap-01234567
+volume_id:
+ description: The ID of the volume that was used to create the snapshot.
+ type: string
+ sample: vol-01234567
+state:
+ description: The snapshot state (completed, pending or error).
+ type: string
+ sample: completed
+state_message:
+ description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred.
+ type: string
+ sample:
+start_time:
+ description: The time stamp when the snapshot was initiated.
+ type: datetime
+ sample: 2015-02-12T02:14:02+00:00
+progress:
+ description: The progress of the snapshot, as a percentage.
+ type: string
+ sample: 100%
+owner_id:
+ description: The AWS account ID of the EBS snapshot owner.
+ type: string
+ sample: 099720109477
+description:
+ description: The description for the snapshot.
+ type: string
+ sample: My important backup
+volume_size:
+ description: The size of the volume, in GiB.
+ type: integer
+ sample: 8
+owner_alias:
+ description: The AWS account alias (for example, amazon, self) or AWS account ID that owns the snapshot.
+ type: string
+ sample: 033440102211
+tags:
+ description: Any tags assigned to the snapshot.
+ type: list
+ sample: "{ 'my_tag_key': 'my_tag_value' }"
+encrypted:
+ description: Indicates whether the snapshot is encrypted.
+ type: boolean
+ sample: True
+kms_key_id:
+ description: The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) that was used to \
+ protect the volume encryption key for the parent volume.
+ type: string
+ sample: 74c9742a-a1b2-45cb-b3fe-abcdef123456
+data_encryption_key_id:
+ description: The data encryption key identifier for the snapshot. This value is a unique identifier that \
+ corresponds to the data encryption key that was used to encrypt the original volume or snapshot copy.
+ type: string
+ sample: "arn:aws:kms:ap-southeast-2:012345678900:key/74c9742a-a1b2-45cb-b3fe-abcdef123456"
+
+'''
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError, NoCredentialsError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
+ boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,
+ ec2_argument_spec, get_aws_connection_info)
+
+
+def list_ec2_snapshots(connection, module):
+
+ snapshot_ids = module.params.get("snapshot_ids")
+ owner_ids = map(str, module.params.get("owner_ids"))
+ restorable_by_user_ids = module.params.get("restorable_by_user_ids")
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ try:
+ snapshots = connection.describe_snapshots(SnapshotIds=snapshot_ids, OwnerIds=owner_ids, RestorableByUserIds=restorable_by_user_ids, Filters=filters)
+ except ClientError as e:
+ module.fail_json(msg=e.message)
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_snapshots = []
+ for snapshot in snapshots['Snapshots']:
+ snaked_snapshots.append(camel_dict_to_snake_dict(snapshot))
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for snapshot in snaked_snapshots:
+ if 'tags' in snapshot:
+ snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags'])
+
+ module.exit_json(snapshots=snaked_snapshots)
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ snapshot_ids=dict(default=[], type='list'),
+ owner_ids=dict(default=[], type='list'),
+ restorable_by_user_ids=dict(default=[], type='list'),
+ filters=dict(default={}, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters']
+ ]
+ )
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+
+ if region:
+ connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_params)
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_snapshots(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vol_facts.py b/lib/ansible/modules/cloud/amazon/ec2_vol_facts.py
new file mode 100644
index 0000000000..14f5282eca
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_vol_facts.py
@@ -0,0 +1,145 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_vol_facts
+short_description: Gather facts about ec2 volumes in AWS
+description:
+ - Gather facts about ec2 volumes in AWS
+version_added: "2.1"
+author: "Rob White (@wimnat)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters.
+ required: false
+ default: null
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all volumes
+- ec2_vol_facts:
+
+# Gather facts about a particular volume using volume ID
+- ec2_vol_facts:
+ filters:
+ volume-id: vol-00112233
+
+# Gather facts about any volume with a tag key Name and value Example
+- ec2_vol_facts:
+ filters:
+ "tag:Name": Example
+
+# Gather facts about any volume that is attached
+- ec2_vol_facts:
+ filters:
+ attachment.status: attached
+
+'''
+
+# TODO: Disabled the RETURN as it was breaking docs building. Someone needs to
+# fix this
+RETURN = '''# '''
+
+try:
+ import boto.ec2
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info
+
+
+def get_volume_info(volume):
+
+ attachment = volume.attach_data
+
+ volume_info = {
+ 'create_time': volume.create_time,
+ 'id': volume.id,
+ 'iops': volume.iops,
+ 'size': volume.size,
+ 'snapshot_id': volume.snapshot_id,
+ 'status': volume.status,
+ 'type': volume.type,
+ 'zone': volume.zone,
+ 'region': volume.region.name,
+ 'attachment_set': {
+ 'attach_time': attachment.attach_time,
+ 'device': attachment.device,
+ 'instance_id': attachment.instance_id,
+ 'status': attachment.status
+ },
+ 'tags': volume.tags
+ }
+
+ return volume_info
+
+def list_ec2_volumes(connection, module):
+
+ filters = module.params.get("filters")
+ volume_dict_array = []
+
+ try:
+ all_volumes = connection.get_all_volumes(filters=filters)
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ for volume in all_volumes:
+ volume_dict_array.append(get_volume_info(volume))
+
+ module.exit_json(volumes=volume_dict_array)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ filters = dict(default=None, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, StandardError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_volumes(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options.py
new file mode 100644
index 0000000000..4caee64451
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options.py
@@ -0,0 +1,389 @@
+#!/usr/bin/python
+
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: ec2_vpc_dhcp_options
+short_description: Manages DHCP Options, and can ensure the DHCP options for the given VPC match what's
+ requested
+description:
+ - This module removes, or creates DHCP option sets, and can associate them to a VPC.
+ Optionally, a new DHCP Options set can be created that converges a VPC's existing
+ DHCP option set with values provided.
+ When dhcp_options_id is provided, the module will
+ 1. remove (with state='absent')
+ 2. ensure tags are applied (if state='present' and tags are provided
+ 3. attach it to a VPC (if state='present' and a vpc_id is provided.
+ If any of the optional values are missing, they will either be treated
+ as a no-op (i.e., inherit what already exists for the VPC)
+ To remove existing options while inheriting, supply an empty value
+ (e.g. set ntp_servers to [] if you want to remove them from the VPC's options)
+ Most of the options should be self-explanatory.
+author: "Joel Thompson (@joelthompson)"
+version_added: 2.1
+options:
+ domain_name:
+ description:
+ - The domain name to set in the DHCP option sets
+ required: false
+ default: None
+ dns_servers:
+ description:
+ - A list of hosts to set the DNS servers for the VPC to. (Should be a
+ list of IP addresses rather than host names.)
+ required: false
+ default: None
+ ntp_servers:
+ description:
+ - List of hosts to advertise as NTP servers for the VPC.
+ required: false
+ default: None
+ netbios_name_servers:
+ description:
+ - List of hosts to advertise as NetBIOS servers.
+ required: false
+ default: None
+ netbios_node_type:
+ description:
+ - NetBIOS node type to advertise in the DHCP options.
+ The AWS recommendation is to use 2 (when using netbios name services)
+ http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html
+ required: false
+ default: None
+ vpc_id:
+ description:
+ - VPC ID to associate with the requested DHCP option set.
+ If no vpc id is provided, and no matching option set is found then a new
+ DHCP option set is created.
+ required: false
+ default: None
+ delete_old:
+ description:
+ - Whether to delete the old VPC DHCP option set when associating a new one.
+ This is primarily useful for debugging/development purposes when you
+ want to quickly roll back to the old option set. Note that this setting
+ will be ignored, and the old DHCP option set will be preserved, if it
+ is in use by any other VPC. (Otherwise, AWS will return an error.)
+ required: false
+ default: true
+ inherit_existing:
+ description:
+ - For any DHCP options not specified in these parameters, whether to
+ inherit them from the options set already applied to vpc_id, or to
+ reset them to be empty.
+ required: false
+ default: false
+ tags:
+ description:
+ - Tags to be applied to a VPC options set if a new one is created, or
+ if the resource_id is provided. (options must match)
+ required: False
+ default: None
+ aliases: [ 'resource_tags']
+ version_added: "2.1"
+ dhcp_options_id:
+ description:
+ - The resource_id of an existing DHCP options set.
+ If this is specified, then it will override other settings, except tags
+ (which will be updated to match)
+ required: False
+ default: None
+ version_added: "2.1"
+ state:
+ description:
+ - create/assign or remove the DHCP options.
+ If state is set to absent, then a DHCP options set matched either
+ by id, or tags and options will be removed if possible.
+ required: False
+ default: present
+ choices: [ 'absent', 'present' ]
+ version_added: "2.1"
+extends_documentation_fragment: aws
+requirements:
+ - boto
+"""
+
+RETURN = """
+new_options:
+ description: The DHCP options created, associated or found
+ returned: when appropriate
+ type: dict
+ sample:
+ domain-name-servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbois-name-servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios-node-type: 2
+ domain-name: "my.example.com"
+dhcp_options_id:
+ description: The aws resource id of the primary DCHP options set created, found or removed
+ type: string
+ returned: when available
+changed:
+ description: Whether the dhcp options were changed
+ type: bool
+ returned: always
+"""
+
+EXAMPLES = """
+# Completely overrides the VPC DHCP options associated with VPC vpc-123456 and deletes any existing
+# DHCP option set that may have been attached to that VPC.
+- ec2_vpc_dhcp_options:
+ domain_name: "foo.example.com"
+ region: us-east-1
+ dns_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ ntp_servers:
+ - 10.0.0.2
+ - 10.0.1.2
+ netbios_name_servers:
+ - 10.0.0.1
+ - 10.0.1.1
+ netbios_node_type: 2
+ vpc_id: vpc-123456
+ delete_old: True
+ inherit_existing: False
+
+
+# Ensure the DHCP option set for the VPC has 10.0.0.4 and 10.0.1.4 as the specified DNS servers, but
+# keep any other existing settings. Also, keep the old DHCP option set around.
+- ec2_vpc_dhcp_options:
+ region: us-east-1
+ dns_servers:
+ - "{{groups['dns-primary']}}"
+ - "{{groups['dns-secondary']}}"
+ vpc_id: vpc-123456
+ inherit_existing: True
+ delete_old: False
+
+
+## Create a DHCP option set with 4.4.4.4 and 8.8.8.8 as the specified DNS servers, with tags
+## but do not assign to a VPC
+- ec2_vpc_dhcp_options:
+ region: us-east-1
+ dns_servers:
+ - 4.4.4.4
+ - 8.8.8.8
+ tags:
+ Name: google servers
+ Environment: Test
+
+## Delete a DHCP options set that matches the tags and options specified
+- ec2_vpc_dhcp_options:
+ region: us-east-1
+ dns_servers:
+ - 4.4.4.4
+ - 8.8.8.8
+ tags:
+ Name: google servers
+ Environment: Test
+ state: absent
+
+## Associate a DHCP options set with a VPC by ID
+- ec2_vpc_dhcp_options:
+ region: us-east-1
+ dhcp_options_id: dopt-12345678
+ vpc_id: vpc-123456
+
+"""
+
+import boto.vpc
+import boto.ec2
+from boto.exception import EC2ResponseError
+import socket
+import collections
+
+def get_resource_tags(vpc_conn, resource_id):
+ return dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
+
+def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode):
+ try:
+ cur_tags = get_resource_tags(vpc_conn, resource_id)
+ if tags == cur_tags:
+ return {'changed': False, 'tags': cur_tags}
+
+ to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
+ if to_delete and not add_only:
+ vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode)
+
+ to_add = dict((k, tags[k]) for k in tags if k not in cur_tags)
+ if to_add:
+ vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode)
+
+ latest_tags = get_resource_tags(vpc_conn, resource_id)
+ return {'changed': True, 'tags': latest_tags}
+ except EC2ResponseError as e:
+ module.fail_json(msg=get_error_message(e.args[2]))
+
+def fetch_dhcp_options_for_vpc(vpc_conn, vpc_id):
+ """
+ Returns the DHCP options object currently associated with the requested VPC ID using the VPC
+ connection variable.
+ """
+ vpcs = vpc_conn.get_all_vpcs(vpc_ids=[vpc_id])
+ if len(vpcs) != 1 or vpcs[0].dhcp_options_id == "default":
+ return None
+ dhcp_options = vpc_conn.get_all_dhcp_options(dhcp_options_ids=[vpcs[0].dhcp_options_id])
+ if len(dhcp_options) != 1:
+ return None
+ return dhcp_options[0]
+
+def match_dhcp_options(vpc_conn, tags=None, options=None):
+ """
+ Finds a DHCP Options object that optionally matches the tags and options provided
+ """
+ dhcp_options = vpc_conn.get_all_dhcp_options()
+ for dopts in dhcp_options:
+ if (not tags) or get_resource_tags(vpc_conn, dopts.id) == tags:
+ if (not options) or dopts.options == options:
+ return(True, dopts)
+ return(False, None)
+
+def remove_dhcp_options_by_id(vpc_conn, dhcp_options_id):
+ associations = vpc_conn.get_all_vpcs(filters={'dhcpOptionsId': dhcp_options_id})
+ if len(associations) > 0:
+ return False
+ else:
+ vpc_conn.delete_dhcp_options(dhcp_options_id)
+ return True
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ dhcp_options_id=dict(type='str', default=None),
+ domain_name=dict(type='str', default=None),
+ dns_servers=dict(type='list', default=None),
+ ntp_servers=dict(type='list', default=None),
+ netbios_name_servers=dict(type='list', default=None),
+ netbios_node_type=dict(type='int', default=None),
+ vpc_id=dict(type='str', default=None),
+ delete_old=dict(type='bool', default=True),
+ inherit_existing=dict(type='bool', default=False),
+ tags=dict(type='dict', default=None, aliases=['resource_tags']),
+ state=dict(type='str', default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+ params = module.params
+ found = False
+ changed = False
+ new_options = collections.defaultdict(lambda: None)
+
+
+ region, ec2_url, boto_params = get_aws_connection_info(module)
+ connection = connect_to_aws(boto.vpc, region, **boto_params)
+
+ existing_options = None
+
+ # First check if we were given a dhcp_options_id
+ if not params['dhcp_options_id']:
+ # No, so create new_options from the parameters
+ if params['dns_servers'] != None:
+ new_options['domain-name-servers'] = params['dns_servers']
+ if params['netbios_name_servers'] != None:
+ new_options['netbios-name-servers'] = params['netbios_name_servers']
+ if params['ntp_servers'] != None:
+ new_options['ntp-servers'] = params['ntp_servers']
+ if params['domain_name'] != None:
+ # needs to be a list for comparison with boto objects later
+ new_options['domain-name'] = [ params['domain_name'] ]
+ if params['netbios_node_type'] != None:
+ # needs to be a list for comparison with boto objects later
+ new_options['netbios-node-type'] = [ str(params['netbios_node_type']) ]
+ # If we were given a vpc_id then we need to look at the options on that
+ if params['vpc_id']:
+ existing_options = fetch_dhcp_options_for_vpc(connection, params['vpc_id'])
+ # if we've been asked to inherit existing options, do that now
+ if params['inherit_existing']:
+ if existing_options:
+ for option in [ 'domain-name-servers', 'netbios-name-servers', 'ntp-servers', 'domain-name', 'netbios-node-type']:
+ if existing_options.options.get(option) and new_options[option] != [] and (not new_options[option] or [''] == new_options[option]):
+ new_options[option] = existing_options.options.get(option)
+
+ # Do the vpc's dhcp options already match what we're asked for? if so we are done
+ if existing_options and new_options == existing_options.options:
+ module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=existing_options.id)
+
+ # If no vpc_id was given, or the options don't match then look for an existing set using tags
+ found, dhcp_option = match_dhcp_options(connection, params['tags'], new_options)
+
+ # Now let's cover the case where there are existing options that we were told about by id
+ # If a dhcp_options_id was supplied we don't look at options inside, just set tags (if given)
+ else:
+ supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id':params['dhcp_options_id']})
+ if len(supplied_options) != 1:
+ if params['state'] != 'absent':
+ module.fail_json(msg=" a dhcp_options_id was supplied, but does not exist")
+ else:
+ found = True
+ dhcp_option = supplied_options[0]
+ if params['state'] != 'absent' and params['tags']:
+ ensure_tags(connection, dhcp_option.id, params['tags'], False, module.check_mode)
+
+ # Now we have the dhcp options set, let's do the necessary
+
+ # if we found options we were asked to remove then try to do so
+ if params['state'] == 'absent':
+ if not module.check_mode:
+ if found:
+ changed = remove_dhcp_options_by_id(connection, dhcp_option.id)
+ module.exit_json(changed=changed, new_options={})
+
+ # otherwise if we haven't found the required options we have something to do
+ elif not module.check_mode and not found:
+
+ # create some dhcp options if we weren't able to use existing ones
+ if not found:
+ # Convert netbios-node-type and domain-name back to strings
+ if new_options['netbios-node-type']:
+ new_options['netbios-node-type'] = new_options['netbios-node-type'][0]
+ if new_options['domain-name']:
+ new_options['domain-name'] = new_options['domain-name'][0]
+
+ # create the new dhcp options set requested
+ dhcp_option = connection.create_dhcp_options(
+ new_options['domain-name'],
+ new_options['domain-name-servers'],
+ new_options['ntp-servers'],
+ new_options['netbios-name-servers'],
+ new_options['netbios-node-type'])
+ changed = True
+ if params['tags']:
+ ensure_tags(connection, dhcp_option.id, params['tags'], False, module.check_mode)
+
+ # If we were given a vpc_id, then attach the options we now have to that before we finish
+ if params['vpc_id'] and not module.check_mode:
+ changed = True
+ connection.associate_dhcp_options(dhcp_option.id, params['vpc_id'])
+ # and remove old ones if that was requested
+ if params['delete_old'] and existing_options:
+ remove_dhcp_options_by_id(connection, existing_options.id)
+
+ module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=dhcp_option.id)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options_facts.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options_facts.py
new file mode 100644
index 0000000000..063f525ea0
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options_facts.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_dhcp_options_facts
+short_description: Gather facts about dhcp options sets in AWS
+description:
+ - Gather facts about dhcp options sets in AWS
+version_added: "2.2"
+requirements: [ boto3 ]
+author: "Nick Aslanidis (@naslanidis)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
+ required: false
+ default: null
+ DhcpOptionsIds:
+ description:
+ - Get details of specific DHCP Option ID
+ - Provide this value as a list
+ required: false
+ default: None
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# # Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Gather facts about all DHCP Option sets for an account or profile
+ ec2_vpc_dhcp_options_facts:
+ region: ap-southeast-2
+ profile: production
+ register: dhcp_facts
+
+- name: Gather facts about a filtered list of DHCP Option sets
+ ec2_vpc_dhcp_options_facts:
+ region: ap-southeast-2
+ profile: production
+ filters:
+ "tag:Name": "abc-123"
+ register: dhcp_facts
+
+- name: Gather facts about a specific DHCP Option set by DhcpOptionId
+ ec2_vpc_dhcp_options_facts:
+ region: ap-southeast-2
+ profile: production
+ DhcpOptionsIds: dopt-123fece2
+ register: dhcp_facts
+
+'''
+
+RETURN = '''
+dhcp_options:
+ description: The dhcp option sets for the account
+ returned: always
+ type: list
+
+changed:
+ description: True if listing the dhcp options succeeds
+ type: bool
+ returned: always
+'''
+
+import json
+
+try:
+ import botocore
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+def get_dhcp_options_info(dhcp_option):
+ dhcp_option_info = {'DhcpOptionsId': dhcp_option['DhcpOptionsId'],
+ 'DhcpConfigurations': dhcp_option['DhcpConfigurations'],
+ 'Tags': dhcp_option['Tags']
+ }
+ return dhcp_option_info
+
+
+def list_dhcp_options(client, module):
+ dryrun = module.params.get("DryRun")
+ all_dhcp_options_array = []
+ params = dict()
+
+ if module.params.get('filters'):
+ params['Filters'] = []
+ for key, value in module.params.get('filters').iteritems():
+ temp_dict = dict()
+ temp_dict['Name'] = key
+ if isinstance(value, basestring):
+ temp_dict['Values'] = [value]
+ else:
+ temp_dict['Values'] = value
+ params['Filters'].append(temp_dict)
+
+ if module.params.get("DryRun"):
+ params['DryRun'] = module.params.get("DryRun")
+
+ if module.params.get("DhcpOptionsIds"):
+ params['DhcpOptionsIds'] = module.params.get("DhcpOptionsIds")
+
+ try:
+ all_dhcp_options = client.describe_dhcp_options(**params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+ for dhcp_option in all_dhcp_options['DhcpOptions']:
+ all_dhcp_options_array.append(get_dhcp_options_info(dhcp_option))
+
+ snaked_dhcp_options_array = []
+ for dhcp_option in all_dhcp_options_array:
+ snaked_dhcp_options_array.append(camel_dict_to_snake_dict(dhcp_option))
+
+ module.exit_json(dhcp_options=snaked_dhcp_options_array)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ filters = dict(type='dict', default=None, ),
+ DryRun = dict(type='bool', default=False),
+ DhcpOptionsIds = dict(type='list', default=None)
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ # Validate Requirements
+ if not HAS_BOTO3:
+ module.fail_json(msg='json and botocore/boto3 is required.')
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except botocore.exceptions.NoCredentialsError as e:
+ module.fail_json(msg="Can't authorize connection - "+str(e))
+
+ # call your function here
+ results = list_dhcp_options(connection, module)
+
+ module.exit_json(result=results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_igw.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_igw.py
new file mode 100644
index 0000000000..91366f35ac
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_igw.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_igw
+short_description: Manage an AWS VPC Internet gateway
+description:
+ - Manage an AWS VPC Internet gateway
+version_added: "2.0"
+author: Robert Estelle (@erydo)
+options:
+ vpc_id:
+ description:
+ - The VPC ID for the VPC in which to manage the Internet Gateway.
+ required: true
+ default: null
+ state:
+ description:
+ - Create or terminate the IGW
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Ensure that the VPC has an Internet Gateway.
+# The Internet Gateway ID is can be accessed via {{igw.gateway_id}} for use in setting up NATs etc.
+ec2_vpc_igw:
+ vpc_id: vpc-abcdefgh
+ state: present
+register: igw
+
+'''
+
+try:
+ import boto.ec2
+ import boto.vpc
+ from boto.exception import EC2ResponseError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+ if __name__ != '__main__':
+ raise
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
+
+
+class AnsibleIGWException(Exception):
+ pass
+
+
+def ensure_igw_absent(vpc_conn, vpc_id, check_mode):
+ igws = vpc_conn.get_all_internet_gateways(
+ filters={'attachment.vpc-id': vpc_id})
+
+ if not igws:
+ return {'changed': False}
+
+ if check_mode:
+ return {'changed': True}
+
+ for igw in igws:
+ try:
+ vpc_conn.detach_internet_gateway(igw.id, vpc_id)
+ vpc_conn.delete_internet_gateway(igw.id)
+ except EC2ResponseError as e:
+ raise AnsibleIGWException(
+ 'Unable to delete Internet Gateway, error: {0}'.format(e))
+
+ return {'changed': True}
+
+
+def ensure_igw_present(vpc_conn, vpc_id, check_mode):
+ igws = vpc_conn.get_all_internet_gateways(
+ filters={'attachment.vpc-id': vpc_id})
+
+ if len(igws) > 1:
+ raise AnsibleIGWException(
+ 'EC2 returned more than one Internet Gateway for VPC {0}, aborting'
+ .format(vpc_id))
+
+ if igws:
+ return {'changed': False, 'gateway_id': igws[0].id}
+ else:
+ if check_mode:
+ return {'changed': True, 'gateway_id': None}
+
+ try:
+ igw = vpc_conn.create_internet_gateway()
+ vpc_conn.attach_internet_gateway(igw.id, vpc_id)
+ return {'changed': True, 'gateway_id': igw.id}
+ except EC2ResponseError as e:
+ raise AnsibleIGWException(
+ 'Unable to create Internet Gateway, error: {0}'.format(e))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ vpc_id = dict(required=True),
+ state = dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ vpc_id = module.params.get('vpc_id')
+ state = module.params.get('state', 'present')
+
+ try:
+ if state == 'present':
+ result = ensure_igw_present(connection, vpc_id, check_mode=module.check_mode)
+ elif state == 'absent':
+ result = ensure_igw_absent(connection, vpc_id, check_mode=module.check_mode)
+ except AnsibleIGWException as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py
new file mode 100644
index 0000000000..1758e288c6
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_nacl.py
@@ -0,0 +1,548 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: ec2_vpc_nacl
+short_description: create and delete Network ACLs.
+description:
+ - Read the AWS documentation for Network ACLS
+ U(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html)
+version_added: "2.2"
+options:
+ name:
+ description:
+ - Tagged name identifying a network ACL.
+ required: true
+ vpc_id:
+ description:
+ - VPC id of the requesting VPC.
+ required: true
+ subnets:
+ description:
+ - The list of subnets that should be associated with the network ACL.
+ - Must be specified as a list
+ - Each subnet can be specified as subnet ID, or its tagged name.
+ required: false
+ egress:
+ description:
+ - A list of rules for outgoing traffic.
+ - Each rule must be specified as a list.
+ required: false
+ ingress:
+ description:
+ - List of rules for incoming traffic.
+ - Each rule must be specified as a list.
+ required: false
+ tags:
+ description:
+ - Dictionary of tags to look for and apply when creating a network ACL.
+ required: false
+ state:
+ description:
+ - Creates or modifies an existing NACL
+ - Deletes a NACL and reassociates subnets to the default NACL
+ required: false
+ choices: ['present', 'absent']
+ default: present
+author: Mike Mochan(@mmochan)
+extends_documentation_fragment: aws
+requirements: [ botocore, boto3, json ]
+'''
+
+EXAMPLES = '''
+
+# Complete example to create and delete a network ACL
+# that allows SSH, HTTP and ICMP in, and all traffic out.
+- name: "Create and associate production DMZ network ACL with DMZ subnets"
+ ec2_vpc_nacl:
+ vpc_id: vpc-12345678
+ name: prod-dmz-nacl
+ region: ap-southeast-2
+ subnets: ['prod-dmz-1', 'prod-dmz-2']
+ tags:
+ CostCode: CC1234
+ Project: phoenix
+ Description: production DMZ
+ ingress: [
+ # rule no, protocol, allow/deny, cidr, icmp_code, icmp_type,
+ # port from, port to
+ [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22],
+ [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80],
+ [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8],
+ ]
+ egress: [
+ [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
+ ]
+ state: 'present'
+
+- name: "Remove the ingress and egress rules - defaults to deny all"
+ ec2_vpc_nacl:
+ vpc_id: vpc-12345678
+ name: prod-dmz-nacl
+ region: ap-southeast-2
+ subnets:
+ - prod-dmz-1
+ - prod-dmz-2
+ tags:
+ CostCode: CC1234
+ Project: phoenix
+ Description: production DMZ
+ state: present
+
+- name: "Remove the NACL subnet associations and tags"
+ ec2_vpc_nacl:
+ vpc_id: 'vpc-12345678'
+ name: prod-dmz-nacl
+ region: ap-southeast-2
+ state: present
+
+- name: "Delete nacl and subnet associations"
+ ec2_vpc_nacl:
+ vpc_id: vpc-12345678
+ name: prod-dmz-nacl
+ state: absent
+'''
+RETURN = '''
+task:
+ description: The result of the create, or delete action.
+ returned: success
+ type: dictionary
+'''
+
+try:
+ import botocore
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
+
+
+# Common fields for the default rule that is contained within every VPC NACL.
+DEFAULT_RULE_FIELDS = {
+ 'RuleNumber': 32767,
+ 'RuleAction': 'deny',
+ 'CidrBlock': '0.0.0.0/0',
+ 'Protocol': '-1'
+}
+
+DEFAULT_INGRESS = dict(DEFAULT_RULE_FIELDS.items() + [('Egress', False)])
+DEFAULT_EGRESS = dict(DEFAULT_RULE_FIELDS.items() + [('Egress', True)])
+
+# VPC-supported IANA protocol numbers
+# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
+PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, }
+
+
+#Utility methods
+def icmp_present(entry):
+ if len(entry) == 6 and entry[1] == 'icmp' or entry[1] == 1:
+ return True
+
+
+def load_tags(module):
+ tags = []
+ if module.params.get('tags'):
+ for name, value in module.params.get('tags').iteritems():
+ tags.append({'Key': name, 'Value': str(value)})
+ tags.append({'Key': "Name", 'Value': module.params.get('name')})
+ else:
+ tags.append({'Key': "Name", 'Value': module.params.get('name')})
+ return tags
+
+
+def subnets_removed(nacl_id, subnets, client, module):
+ results = find_acl_by_id(nacl_id, client, module)
+ associations = results['NetworkAcls'][0]['Associations']
+ subnet_ids = [assoc['SubnetId'] for assoc in associations]
+ return [subnet for subnet in subnet_ids if subnet not in subnets]
+
+
+def subnets_added(nacl_id, subnets, client, module):
+ results = find_acl_by_id(nacl_id, client, module)
+ associations = results['NetworkAcls'][0]['Associations']
+ subnet_ids = [assoc['SubnetId'] for assoc in associations]
+ return [subnet for subnet in subnets if subnet not in subnet_ids]
+
+
+def subnets_changed(nacl, client, module):
+ changed = False
+ vpc_id = module.params.get('vpc_id')
+ nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ subnets = subnets_to_associate(nacl, client, module)
+ if not subnets:
+ default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
+ subnets = find_subnet_ids_by_nacl_id(nacl_id, client, module)
+ if subnets:
+ replace_network_acl_association(default_nacl_id, subnets, client, module)
+ changed = True
+ return changed
+ changed = False
+ return changed
+ subs_added = subnets_added(nacl_id, subnets, client, module)
+ if subs_added:
+ replace_network_acl_association(nacl_id, subs_added, client, module)
+ changed = True
+ subs_removed = subnets_removed(nacl_id, subnets, client, module)
+ if subs_removed:
+ default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
+ replace_network_acl_association(default_nacl_id, subs_removed, client, module)
+ changed = True
+ return changed
+
+
+def nacls_changed(nacl, client, module):
+ changed = False
+ params = dict()
+ params['egress'] = module.params.get('egress')
+ params['ingress'] = module.params.get('ingress')
+
+ nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ nacl = describe_network_acl(client, module)
+ entries = nacl['NetworkAcls'][0]['Entries']
+ tmp_egress = [entry for entry in entries if entry['Egress'] is True and DEFAULT_EGRESS !=entry]
+ tmp_ingress = [entry for entry in entries if entry['Egress'] is False]
+ egress = [rule for rule in tmp_egress if DEFAULT_EGRESS != rule]
+ ingress = [rule for rule in tmp_ingress if DEFAULT_INGRESS != rule]
+ if rules_changed(egress, params['egress'], True, nacl_id, client, module):
+ changed = True
+ if rules_changed(ingress, params['ingress'], False, nacl_id, client, module):
+ changed = True
+ return changed
+
+
+def tags_changed(nacl_id, client, module):
+ changed = False
+ tags = dict()
+ if module.params.get('tags'):
+ tags = module.params.get('tags')
+ tags['Name'] = module.params.get('name')
+ nacl = find_acl_by_id(nacl_id, client, module)
+ if nacl['NetworkAcls']:
+ nacl_values = [t.values() for t in nacl['NetworkAcls'][0]['Tags']]
+ nacl_tags = [item for sublist in nacl_values for item in sublist]
+ tag_values = [[key, str(value)] for key, value in tags.iteritems()]
+ tags = [item for sublist in tag_values for item in sublist]
+ if sorted(nacl_tags) == sorted(tags):
+ changed = False
+ return changed
+ else:
+ delete_tags(nacl_id, client, module)
+ create_tags(nacl_id, client, module)
+ changed = True
+ return changed
+ return changed
+
+
+def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module):
+ changed = False
+ rules = list()
+ for entry in param_rules:
+ rules.append(process_rule_entry(entry, Egress))
+ if rules == aws_rules:
+ return changed
+ else:
+ removed_rules = [x for x in aws_rules if x not in rules]
+ if removed_rules:
+ params = dict()
+ for rule in removed_rules:
+ params['NetworkAclId'] = nacl_id
+ params['RuleNumber'] = rule['RuleNumber']
+ params['Egress'] = Egress
+ delete_network_acl_entry(params, client, module)
+ changed = True
+ added_rules = [x for x in rules if x not in aws_rules]
+ if added_rules:
+ for rule in added_rules:
+ rule['NetworkAclId'] = nacl_id
+ create_network_acl_entry(rule, client, module)
+ changed = True
+ return changed
+
+
+def process_rule_entry(entry, Egress):
+ params = dict()
+ params['RuleNumber'] = entry[0]
+ params['Protocol'] = str(PROTOCOL_NUMBERS[entry[1]])
+ params['RuleAction'] = entry[2]
+ params['Egress'] = Egress
+ params['CidrBlock'] = entry[3]
+ if icmp_present(entry):
+ params['IcmpTypeCode'] = {"Type": int(entry[4]), "Code": int(entry[5])}
+ else:
+ if entry[6] or entry[7]:
+ params['PortRange'] = {"From": entry[6], 'To': entry[7]}
+ return params
+
+
+def restore_default_associations(assoc_ids, default_nacl_id, client, module):
+ if assoc_ids:
+ params = dict()
+ params['NetworkAclId'] = default_nacl_id[0]
+ for assoc_id in assoc_ids:
+ params['AssociationId'] = assoc_id
+ restore_default_acl_association(params, client, module)
+ return True
+
+
+def construct_acl_entries(nacl, client, module):
+ for entry in module.params.get('ingress'):
+ params = process_rule_entry(entry, Egress=False)
+ params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
+ create_network_acl_entry(params, client, module)
+ for rule in module.params.get('egress'):
+ params = process_rule_entry(rule, Egress=True)
+ params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
+ create_network_acl_entry(params, client, module)
+
+
+## Module invocations
+def setup_network_acl(client, module):
+ changed = False
+ nacl = describe_network_acl(client, module)
+ if not nacl['NetworkAcls']:
+ nacl = create_network_acl(module.params.get('vpc_id'), client, module)
+ nacl_id = nacl['NetworkAcl']['NetworkAclId']
+ create_tags(nacl_id, client, module)
+ subnets = subnets_to_associate(nacl, client, module)
+ replace_network_acl_association(nacl_id, subnets, client, module)
+ construct_acl_entries(nacl, client, module)
+ changed = True
+ return(changed, nacl['NetworkAcl']['NetworkAclId'])
+ else:
+ changed = False
+ nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ subnet_result = subnets_changed(nacl, client, module)
+ nacl_result = nacls_changed(nacl, client, module)
+ tag_result = tags_changed(nacl_id, client, module)
+ if subnet_result is True or nacl_result is True or tag_result is True:
+ changed = True
+ return(changed, nacl_id)
+ return (changed, nacl_id)
+
+
+def remove_network_acl(client, module):
+ changed = False
+ result = dict()
+ vpc_id = module.params.get('vpc_id')
+ nacl = describe_network_acl(client, module)
+ if nacl['NetworkAcls']:
+ nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
+ associations = nacl['NetworkAcls'][0]['Associations']
+ assoc_ids = [a['NetworkAclAssociationId'] for a in associations]
+ default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)
+ if not default_nacl_id:
+ result = {vpc_id: "Default NACL ID not found - Check the VPC ID"}
+ return changed, result
+ if restore_default_associations(assoc_ids, default_nacl_id, client, module):
+ delete_network_acl(nacl_id, client, module)
+ changed = True
+ result[nacl_id] = "Successfully deleted"
+ return changed, result
+ if not assoc_ids:
+ delete_network_acl(nacl_id, client, module)
+ changed = True
+ result[nacl_id] = "Successfully deleted"
+ return changed, result
+ return changed, result
+
+
+#Boto3 client methods
+def create_network_acl(vpc_id, client, module):
+ try:
+ nacl = client.create_network_acl(VpcId=vpc_id)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ return nacl
+
+
+def create_network_acl_entry(params, client, module):
+ try:
+ result = client.create_network_acl_entry(**params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ return result
+
+
+def create_tags(nacl_id, client, module):
+ try:
+ delete_tags(nacl_id, client, module)
+ client.create_tags(Resources=[nacl_id], Tags=load_tags(module))
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_network_acl(nacl_id, client, module):
+ try:
+ client.delete_network_acl(NetworkAclId=nacl_id)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_network_acl_entry(params, client, module):
+ try:
+ client.delete_network_acl_entry(**params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_tags(nacl_id, client, module):
+ try:
+ client.delete_tags(Resources=[nacl_id])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def describe_acl_associations(subnets, client, module):
+ if not subnets:
+ return []
+ try:
+ results = client.describe_network_acls(Filters=[
+ {'Name': 'association.subnet-id', 'Values': subnets}
+ ])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ associations = results['NetworkAcls'][0]['Associations']
+ return [a['NetworkAclAssociationId'] for a in associations if a['SubnetId'] in subnets]
+
+
+def describe_network_acl(client, module):
+ try:
+ nacl = client.describe_network_acls(Filters=[
+ {'Name': 'tag:Name', 'Values': [module.params.get('name')]}
+ ])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ return nacl
+
+
+def find_acl_by_id(nacl_id, client, module):
+ try:
+ return client.describe_network_acls(NetworkAclIds=[nacl_id])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def find_default_vpc_nacl(vpc_id, client, module):
+ try:
+ response = client.describe_network_acls(Filters=[
+ {'Name': 'vpc-id', 'Values': [vpc_id]}])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ nacls = response['NetworkAcls']
+ return [n['NetworkAclId'] for n in nacls if n['IsDefault'] == True]
+
+
+def find_subnet_ids_by_nacl_id(nacl_id, client, module):
+ try:
+ results = client.describe_network_acls(Filters=[
+ {'Name': 'association.network-acl-id', 'Values': [nacl_id]}
+ ])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ if results['NetworkAcls']:
+ associations = results['NetworkAcls'][0]['Associations']
+ return [s['SubnetId'] for s in associations if s['SubnetId']]
+ else:
+ return []
+
+
+def replace_network_acl_association(nacl_id, subnets, client, module):
+ params = dict()
+ params['NetworkAclId'] = nacl_id
+ for association in describe_acl_associations(subnets, client, module):
+ params['AssociationId'] = association
+ try:
+ client.replace_network_acl_association(**params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def replace_network_acl_entry(entries, Egress, nacl_id, client, module):
+ params = dict()
+ for entry in entries:
+ params = entry
+ params['NetworkAclId'] = nacl_id
+ try:
+ client.replace_network_acl_entry(**params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def restore_default_acl_association(params, client, module):
+ try:
+ client.replace_network_acl_association(**params)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def subnets_to_associate(nacl, client, module):
+ params = list(module.params.get('subnets'))
+ if not params:
+ return []
+ if params[0].startswith("subnet-"):
+ try:
+ subnets = client.describe_subnets(Filters=[
+ {'Name': 'subnet-id', 'Values': params}])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ else:
+ try:
+ subnets = client.describe_subnets(Filters=[
+ {'Name': 'tag:Name', 'Values': params}])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ return [s['SubnetId'] for s in subnets['Subnets'] if s['SubnetId']]
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ vpc_id=dict(required=True),
+ name=dict(required=True),
+ subnets=dict(required=False, type='list', default=list()),
+ tags=dict(required=False, type='dict'),
+ ingress=dict(required=False, type='list', default=list()),
+ egress=dict(required=False, type='list', default=list(),),
+ state=dict(default='present', choices=['present', 'absent']),
+ ),
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='json, botocore and boto3 are required.')
+ state = module.params.get('state').lower()
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except botocore.exceptions.NoCredentialsError as e:
+ module.fail_json(msg="Can't authorize connection - %s" % str(e))
+
+ invocations = {
+ "present": setup_network_acl,
+ "absent": remove_network_acl
+ }
+ (changed, results) = invocations[state](client, module)
+ module.exit_json(changed=changed, nacl_id=results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_nacl_facts.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_nacl_facts.py
new file mode 100644
index 0000000000..e7f6a5b238
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_nacl_facts.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_nacl_facts
+short_description: Gather facts about Network ACLs in an AWS VPC
+description:
+ - Gather facts about Network ACLs in an AWS VPC
+version_added: "2.2"
+author: "Brad Davidson (@brandond)"
+requires: [ boto3 ]
+options:
+ nacl_ids:
+ description:
+ - A list of Network ACL IDs to retrieve facts about.
+ required: false
+ default: []
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See \
+ U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkAcls.html) for possible filters. Filter \
+ names and values are case sensitive.
+ required: false
+ default: {}
+notes:
+ - By default, the module will return all Network ACLs.
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all Network ACLs:
+- name: Get All NACLs
+ register: all_nacls
+ ec2_vpc_nacl_facts:
+ region: us-west-2
+
+# Retrieve default Network ACLs:
+- name: Get Default NACLs
+ register: default_nacls
+ ec2_vpc_nacl_facts:
+ region: us-west-2
+ filters:
+ 'default': 'true'
+'''
+
+RETURN = '''
+nacl:
+ description: Returns an array of complex objects as described below.
+ returned: success
+ type: list of complex
+ contains:
+ nacl_id:
+ description: The ID of the Network Access Control List.
+ returned: always
+ type: string
+ vpc_id:
+ description: The ID of the VPC that the NACL is attached to.
+ returned: always
+ type: string
+ is_default:
+ description: True if the NACL is the default for its VPC.
+ returned: always
+ type: boolean
+ tags:
+ description: A dict of tags associated with the NACL.
+ returned: always
+ type: dict
+ subnets:
+ description: A list of subnet IDs that are associated with the NACL.
+ returned: always
+ type: list of string
+ ingress:
+ description: A list of NACL ingress rules.
+ returned: always
+ type: list of list
+ egress:
+ description: A list of NACL egress rules.
+ returned: always
+ type: list of list
+'''
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError, NoCredentialsError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+# VPC-supported IANA protocol numbers
+# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
+PROTOCOL_NAMES = {'-1': 'all', '1': 'icmp', '6': 'tcp', '17': 'udp'}
+
+def list_ec2_vpc_nacls(connection, module):
+
+ nacl_ids = module.params.get("nacl_ids")
+ filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
+
+ try:
+ nacls = connection.describe_network_acls(NetworkAclIds=nacl_ids, Filters=filters)
+ except (ClientError, NoCredentialsError) as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+
+ # Turn the boto3 result in to ansible_friendly_snaked_names
+ snaked_nacls = []
+ for nacl in nacls['NetworkAcls']:
+ snaked_nacls.append(camel_dict_to_snake_dict(nacl))
+
+ # Turn the boto3 result in to ansible friendly tag dictionary
+ for nacl in snaked_nacls:
+ if 'tags' in nacl:
+ nacl['tags'] = boto3_tag_list_to_ansible_dict(nacl['tags'])
+ if 'entries' in nacl:
+ nacl['egress'] = [nacl_entry_to_list(e) for e in nacl['entries']
+ if e['rule_number'] != 32767 and e['egress']]
+ nacl['ingress'] = [nacl_entry_to_list(e) for e in nacl['entries']
+ if e['rule_number'] != 32767 and not e['egress']]
+ del nacl['entries']
+ if 'associations' in nacl:
+ nacl['subnets'] = [a['subnet_id'] for a in nacl['associations']]
+ del nacl['associations']
+ if 'network_acl_id' in nacl:
+ nacl['nacl_id'] = nacl['network_acl_id']
+ del nacl['network_acl_id']
+
+ module.exit_json(nacls=snaked_nacls)
+
+def nacl_entry_to_list(entry):
+
+ elist = [entry['rule_number'],
+ PROTOCOL_NAMES[entry['protocol']],
+ entry['rule_action'],
+ entry['cidr_block']
+ ]
+ if entry['protocol'] == '1':
+ elist = elist + [-1, -1]
+ else:
+ elist = elist + [None, None, None, None]
+
+ if 'icmp_type_code' in entry:
+ elist[4] = entry['icmp_type_code']['type']
+ elist[5] = entry['icmp_type_code']['code']
+
+ if 'port_range' in entry:
+ elist[6] = entry['port_range']['from']
+ elist[7] = entry['port_range']['to']
+
+ return elist
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ nacl_ids=dict(default=[], type='list'),
+ filters=dict(default={}, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['nacl_ids', 'filters']
+ ]
+ )
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+
+ if region:
+ connection = boto3_conn(module, conn_type='client', resource='ec2',
+ region=region, endpoint=ec2_url, **aws_connect_params)
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_vpc_nacls(connection, module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway.py
new file mode 100644
index 0000000000..f3f95c107e
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway.py
@@ -0,0 +1,1089 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_nat_gateway
+short_description: Manage AWS VPC NAT Gateways.
+description:
+ - Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids.
+version_added: "2.2"
+requirements: [boto3, botocore]
+options:
+ state:
+ description:
+ - Ensure NAT Gateway is present or absent.
+ required: false
+ default: "present"
+ choices: ["present", "absent"]
+ nat_gateway_id:
+ description:
+ - The id AWS dynamically allocates to the NAT Gateway on creation.
+ This is required when the absent option is present.
+ required: false
+ default: None
+ subnet_id:
+ description:
+ - The id of the subnet to create the NAT Gateway in. This is required
+ with the present option.
+ required: false
+ default: None
+ allocation_id:
+ description:
+ - The id of the elastic IP allocation. If this is not passed and the
+ eip_address is not passed. An EIP is generated for this NAT Gateway.
+ required: false
+ default: None
+ eip_address:
+ description:
+ - The elastic IP address of the EIP you want attached to this NAT Gateway.
+ If this is not passed and the allocation_id is not passed,
+ an EIP is generated for this NAT Gateway.
+ required: false
+ if_exist_do_not_create:
+ description:
+ - if a NAT Gateway exists already in the subnet_id, then do not create a new one.
+ required: false
+ default: false
+ release_eip:
+ description:
+ - Deallocate the EIP from the VPC.
+ - Option is only valid with the absent state.
+ - You should use this with the wait option. Since you can not release an address while a delete operation is happening.
+ required: false
+ default: true
+ wait:
+ description:
+ - Wait for operation to complete before returning.
+ required: false
+ default: false
+ wait_timeout:
+ description:
+ - How many seconds to wait for an operation to complete before timing out.
+ required: false
+ default: 300
+ client_token:
+ description:
+ - Optional unique token to be used during create to ensure idempotency.
+ When specifying this option, ensure you specify the eip_address parameter
+ as well otherwise any subsequent runs will fail.
+ required: false
+
+author:
+ - "Allen Sanabria (@linuxdynasty)"
+ - "Jon Hadfield (@jonhadfield)"
+ - "Karen Cheng(@Etherdaemon)"
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create new nat gateway with client token.
+ ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ eip_address: 52.1.1.1
+ region: ap-southeast-2
+ client_token: abcd-12345678
+ register: new_nat_gateway
+
+- name: Create new nat gateway using an allocation-id.
+ ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ allocation_id: eipalloc-12345678
+ region: ap-southeast-2
+ register: new_nat_gateway
+
+- name: Create new nat gateway, using an EIP address and wait for available status.
+ ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ eip_address: 52.1.1.1
+ wait: yes
+ region: ap-southeast-2
+ register: new_nat_gateway
+
+- name: Create new nat gateway and allocate new EIP.
+ ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ wait: yes
+ region: ap-southeast-2
+ register: new_nat_gateway
+
+- name: Create new nat gateway and allocate new EIP if a nat gateway does not yet exist in the subnet.
+ ec2_vpc_nat_gateway:
+ state: present
+ subnet_id: subnet-12345678
+ wait: yes
+ region: ap-southeast-2
+ if_exist_do_not_create: true
+ register: new_nat_gateway
+
+- name: Delete nat gateway using discovered nat gateways from facts module.
+ ec2_vpc_nat_gateway:
+ state: absent
+ region: ap-southeast-2
+ wait: yes
+ nat_gateway_id: "{{ item.NatGatewayId }}"
+ release_eip: yes
+ register: delete_nat_gateway_result
+ with_items: "{{ gateways_to_remove.result }}"
+
+- name: Delete nat gateway and wait for deleted status.
+ ec2_vpc_nat_gateway:
+ state: absent
+ nat_gateway_id: nat-12345678
+ wait: yes
+ wait_timeout: 500
+ region: ap-southeast-2
+
+- name: Delete nat gateway and release EIP.
+ ec2_vpc_nat_gateway:
+ state: absent
+ nat_gateway_id: nat-12345678
+ release_eip: yes
+ wait: yes
+ wait_timeout: 300
+ region: ap-southeast-2
+'''
+
+RETURN = '''
+create_time:
+ description: The ISO 8601 date time formatin UTC.
+ returned: In all cases.
+ type: string
+ sample: "2016-03-05T05:19:20.282000+00:00'"
+nat_gateway_id:
+ description: id of the VPC NAT Gateway
+ returned: In all cases.
+ type: string
+ sample: "nat-0d1e3a878585988f8"
+subnet_id:
+ description: id of the Subnet
+ returned: In all cases.
+ type: string
+ sample: "subnet-12345"
+state:
+ description: The current state of the NAT Gateway.
+ returned: In all cases.
+ type: string
+ sample: "available"
+vpc_id:
+ description: id of the VPC.
+ returned: In all cases.
+ type: string
+ sample: "vpc-12345"
+nat_gateway_addresses:
+ description: List of dictionairies containing the public_ip, network_interface_id, private_ip, and allocation_id.
+ returned: In all cases.
+ type: string
+ sample: [
+ {
+ 'public_ip': '52.52.52.52',
+ 'network_interface_id': 'eni-12345',
+ 'private_ip': '10.0.0.100',
+ 'allocation_id': 'eipalloc-12345'
+ }
+ ]
+'''
+
+try:
+ import botocore
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+import datetime
+import random
+import re
+import time
+
+from dateutil.tz import tzutc
+
+DRY_RUN_GATEWAYS = [
+ {
+ "nat_gateway_id": "nat-123456789",
+ "subnet_id": "subnet-123456789",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-1234567"
+ }
+ ],
+ "state": "available",
+ "create_time": "2016-03-05T05:19:20.282000+00:00",
+ "vpc_id": "vpc-12345678"
+ }
+]
+DRY_RUN_GATEWAY_UNCONVERTED = [
+ {
+ 'VpcId': 'vpc-12345678',
+ 'State': 'available',
+ 'NatGatewayId': 'nat-123456789',
+ 'SubnetId': 'subnet-123456789',
+ 'NatGatewayAddresses': [
+ {
+ 'PublicIp': '55.55.55.55',
+ 'NetworkInterfaceId': 'eni-1234567',
+ 'AllocationId': 'eipalloc-1234567',
+ 'PrivateIp': '10.0.0.102'
+ }
+ ],
+ 'CreateTime': datetime.datetime(2016, 3, 5, 5, 19, 20, 282000, tzinfo=tzutc())
+ }
+]
+
+DRY_RUN_ALLOCATION_UNCONVERTED = {
+ 'Addresses': [
+ {
+ 'PublicIp': '55.55.55.55',
+ 'Domain': 'vpc',
+ 'AllocationId': 'eipalloc-1234567'
+ }
+ ]
+}
+
+DRY_RUN_MSGS = 'DryRun Mode:'
+
+
+def convert_to_lower(data):
+ """Convert all uppercase keys in dict with lowercase_
+
+ Args:
+ data (dict): Dictionary with keys that have upper cases in them
+ Example.. FooBar == foo_bar
+ if a val is of type datetime.datetime, it will be converted to
+ the ISO 8601
+
+ Basic Usage:
+ >>> test = {'FooBar': []}
+ >>> test = convert_to_lower(test)
+ {
+ 'foo_bar': []
+ }
+
+ Returns:
+ Dictionary
+ """
+ results = dict()
+ if isinstance(data, dict):
+ for key, val in data.items():
+ key = re.sub(r'(([A-Z]{1,3}){1})', r'_\1', key).lower()
+ if key[0] == '_':
+ key = key[1:]
+ if isinstance(val, datetime.datetime):
+ results[key] = val.isoformat()
+ elif isinstance(val, dict):
+ results[key] = convert_to_lower(val)
+ elif isinstance(val, list):
+ converted = list()
+ for item in val:
+ converted.append(convert_to_lower(item))
+ results[key] = converted
+ else:
+ results[key] = val
+ return results
+
+
+def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None,
+ states=None, check_mode=False):
+ """Retrieve a list of NAT Gateways
+ Args:
+ client (botocore.client.EC2): Boto3 client
+
+ Kwargs:
+ subnet_id (str): The subnet_id the nat resides in.
+ nat_gateway_id (str): The Amazon nat id.
+ states (list): States available (pending, failed, available, deleting, and deleted)
+ default=None
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> subnet_id = 'subnet-12345678'
+ >>> get_nat_gateways(client, subnet_id)
+ [
+ true,
+ "",
+ {
+ "nat_gateway_id": "nat-123456789",
+ "subnet_id": "subnet-123456789",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-1234567"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-12345678"
+ }
+
+ Returns:
+ Tuple (bool, str, list)
+ """
+ params = dict()
+ err_msg = ""
+ gateways_retrieved = False
+ existing_gateways = list()
+ if not states:
+ states = ['available', 'pending']
+ if nat_gateway_id:
+ params['NatGatewayIds'] = [nat_gateway_id]
+ else:
+ params['Filter'] = [
+ {
+ 'Name': 'subnet-id',
+ 'Values': [subnet_id]
+ },
+ {
+ 'Name': 'state',
+ 'Values': states
+ }
+ ]
+
+ try:
+ if not check_mode:
+ gateways = client.describe_nat_gateways(**params)['NatGateways']
+ if gateways:
+ for gw in gateways:
+ existing_gateways.append(convert_to_lower(gw))
+ gateways_retrieved = True
+ else:
+ gateways_retrieved = True
+ if nat_gateway_id:
+ if DRY_RUN_GATEWAYS[0]['nat_gateway_id'] == nat_gateway_id:
+ existing_gateways = DRY_RUN_GATEWAYS
+ elif subnet_id:
+ if DRY_RUN_GATEWAYS[0]['subnet_id'] == subnet_id:
+ existing_gateways = DRY_RUN_GATEWAYS
+ err_msg = '{0} Retrieving gateways'.format(DRY_RUN_MSGS)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return gateways_retrieved, err_msg, existing_gateways
+
+
+def wait_for_status(client, wait_timeout, nat_gateway_id, status,
+ check_mode=False):
+ """Wait for the NAT Gateway to reach a status
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ wait_timeout (int): Number of seconds to wait, until this timeout is reached.
+ nat_gateway_id (str): The Amazon nat id.
+ status (str): The status to wait for.
+ examples. status=available, status=deleted
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> subnet_id = 'subnet-12345678'
+ >>> allocation_id = 'eipalloc-12345678'
+ >>> wait_for_status(client, subnet_id, allocation_id)
+ [
+ true,
+ "",
+ {
+ "nat_gateway_id": "nat-123456789",
+ "subnet_id": "subnet-1234567",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-12345678"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-12345677"
+ }
+ ]
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ polling_increment_secs = 5
+ wait_timeout = time.time() + wait_timeout
+ status_achieved = False
+ nat_gateway = dict()
+ states = ['pending', 'failed', 'available', 'deleting', 'deleted']
+ err_msg = ""
+
+ while wait_timeout > time.time():
+ try:
+ gws_retrieved, err_msg, nat_gateways = (
+ get_nat_gateways(
+ client, nat_gateway_id=nat_gateway_id,
+ states=states, check_mode=check_mode
+ )
+ )
+ if gws_retrieved and nat_gateways:
+ nat_gateway = nat_gateways[0]
+ if check_mode:
+ nat_gateway['state'] = status
+
+ if nat_gateway.get('state') == status:
+ status_achieved = True
+ break
+
+ elif nat_gateway.get('state') == 'failed':
+ err_msg = nat_gateway.get('failure_message')
+ break
+
+ elif nat_gateway.get('state') == 'pending':
+ if 'failure_message' in nat_gateway:
+ err_msg = nat_gateway.get('failure_message')
+ status_achieved = False
+ break
+
+ else:
+ time.sleep(polling_increment_secs)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ if not status_achieved:
+ err_msg = "Wait time out reached, while waiting for results"
+
+ return status_achieved, err_msg, nat_gateway
+
+
+def gateway_in_subnet_exists(client, subnet_id, allocation_id=None,
+ check_mode=False):
+ """Retrieve all NAT Gateways for a subnet.
+ Args:
+ subnet_id (str): The subnet_id the nat resides in.
+
+ Kwargs:
+ allocation_id (str): The EIP Amazon identifier.
+ default = None
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> subnet_id = 'subnet-1234567'
+ >>> allocation_id = 'eipalloc-1234567'
+ >>> gateway_in_subnet_exists(client, subnet_id, allocation_id)
+ (
+ [
+ {
+ "nat_gateway_id": "nat-123456789",
+ "subnet_id": "subnet-123456789",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-1234567"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-1234567"
+ }
+ ],
+ False
+ )
+
+ Returns:
+ Tuple (list, bool)
+ """
+ allocation_id_exists = False
+ gateways = []
+ states = ['available', 'pending']
+ gws_retrieved, _, gws = (
+ get_nat_gateways(
+ client, subnet_id, states=states, check_mode=check_mode
+ )
+ )
+ if not gws_retrieved:
+ return gateways, allocation_id_exists
+ for gw in gws:
+ for address in gw['nat_gateway_addresses']:
+ if allocation_id:
+ if address.get('allocation_id') == allocation_id:
+ allocation_id_exists = True
+ gateways.append(gw)
+ else:
+ gateways.append(gw)
+
+ return gateways, allocation_id_exists
+
+
+def get_eip_allocation_id_by_address(client, eip_address, check_mode=False):
+ """Release an EIP from your EIP Pool
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ eip_address (str): The Elastic IP Address of the EIP.
+
+ Kwargs:
+ check_mode (bool): if set to true, do not run anything and
+ falsify the results.
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> eip_address = '52.87.29.36'
+ >>> get_eip_allocation_id_by_address(client, eip_address)
+ 'eipalloc-36014da3'
+
+ Returns:
+ Tuple (str, str)
+ """
+ params = {
+ 'PublicIps': [eip_address],
+ }
+ allocation_id = None
+ err_msg = ""
+ try:
+ if not check_mode:
+ allocations = client.describe_addresses(**params)['Addresses']
+ if len(allocations) == 1:
+ allocation = allocations[0]
+ else:
+ allocation = None
+ else:
+ dry_run_eip = (
+ DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]['PublicIp']
+ )
+ if dry_run_eip == eip_address:
+ allocation = DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]
+ else:
+ allocation = None
+ if allocation:
+ if allocation.get('Domain') != 'vpc':
+ err_msg = (
+ "EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP"
+ .format(eip_address)
+ )
+ else:
+ allocation_id = allocation.get('AllocationId')
+ else:
+ err_msg = (
+ "EIP {0} does not exist".format(eip_address)
+ )
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return allocation_id, err_msg
+
+
+def allocate_eip_address(client, check_mode=False):
+ """Release an EIP from your EIP Pool
+ Args:
+ client (botocore.client.EC2): Boto3 client
+
+ Kwargs:
+ check_mode (bool): if set to true, do not run anything and
+ falsify the results.
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> allocate_eip_address(client)
+ True
+
+ Returns:
+ Tuple (bool, str)
+ """
+ ip_allocated = False
+ new_eip = None
+ err_msg = ''
+ params = {
+ 'Domain': 'vpc',
+ }
+ try:
+ if check_mode:
+ ip_allocated = True
+ random_numbers = (
+ ''.join(str(x) for x in random.sample(range(0, 9), 7))
+ )
+ new_eip = 'eipalloc-{0}'.format(random_numbers)
+ else:
+ new_eip = client.allocate_address(**params)['AllocationId']
+ ip_allocated = True
+ err_msg = 'eipalloc id {0} created'.format(new_eip)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return ip_allocated, err_msg, new_eip
+
+
+def release_address(client, allocation_id, check_mode=False):
+ """Release an EIP from your EIP Pool
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ allocation_id (str): The eip Amazon identifier.
+
+ Kwargs:
+ check_mode (bool): if set to true, do not run anything and
+ falsify the results.
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> allocation_id = "eipalloc-123456"
+ >>> release_address(client, allocation_id)
+ True
+
+ Returns:
+ Boolean, string
+ """
+ err_msg = ''
+ if check_mode:
+ return True, ''
+
+ ip_released = False
+ params = {
+ 'AllocationId': allocation_id,
+ }
+ try:
+ client.release_address(**params)
+ ip_released = True
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return ip_released, err_msg
+
+
+def create(client, subnet_id, allocation_id, client_token=None,
+ wait=False, wait_timeout=0, if_exist_do_not_create=False,
+ check_mode=False):
+ """Create an Amazon NAT Gateway.
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ subnet_id (str): The subnet_id the nat resides in.
+ allocation_id (str): The eip Amazon identifier.
+
+ Kwargs:
+ if_exist_do_not_create (bool): if a nat gateway already exists in this
+ subnet, than do not create another one.
+ default = False
+ wait (bool): Wait for the nat to be in the deleted state before returning.
+ default = False
+ wait_timeout (int): Number of seconds to wait, until this timeout is reached.
+ default = 0
+ client_token (str):
+ default = None
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> subnet_id = 'subnet-1234567'
+ >>> allocation_id = 'eipalloc-1234567'
+ >>> create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
+ [
+ true,
+ "",
+ {
+ "nat_gateway_id": "nat-123456789",
+ "subnet_id": "subnet-1234567",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "55.55.55.55",
+ "network_interface_id": "eni-1234567",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-1234567"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-1234567"
+ }
+ ]
+
+ Returns:
+ Tuple (bool, str, list)
+ """
+ params = {
+ 'SubnetId': subnet_id,
+ 'AllocationId': allocation_id
+ }
+ request_time = datetime.datetime.utcnow()
+ changed = False
+ success = False
+ token_provided = False
+ err_msg = ""
+
+ if client_token:
+ token_provided = True
+ params['ClientToken'] = client_token
+
+ try:
+ if not check_mode:
+ result = client.create_nat_gateway(**params)["NatGateway"]
+ else:
+ result = DRY_RUN_GATEWAY_UNCONVERTED[0]
+ result['CreateTime'] = datetime.datetime.utcnow()
+ result['NatGatewayAddresses'][0]['AllocationId'] = allocation_id
+ result['SubnetId'] = subnet_id
+
+ success = True
+ changed = True
+ create_time = result['CreateTime'].replace(tzinfo=None)
+ if token_provided and (request_time > create_time):
+ changed = False
+ elif wait:
+ success, err_msg, result = (
+ wait_for_status(
+ client, wait_timeout, result['NatGatewayId'], 'available',
+ check_mode=check_mode
+ )
+ )
+ if success:
+ err_msg = (
+ 'NAT gateway {0} created'.format(result['nat_gateway_id'])
+ )
+
+ except botocore.exceptions.ClientError as e:
+ if "IdempotentParameterMismatch" in e.message:
+ err_msg = (
+ 'NAT Gateway does not support update and token has already been provided'
+ )
+ else:
+ err_msg = str(e)
+ success = False
+ changed = False
+ result = None
+
+ return success, changed, err_msg, result
+
+
+def pre_create(client, subnet_id, allocation_id=None, eip_address=None,
+ if_exist_do_not_create=False, wait=False, wait_timeout=0,
+ client_token=None, check_mode=False):
+ """Create an Amazon NAT Gateway.
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ subnet_id (str): The subnet_id the nat resides in.
+
+ Kwargs:
+ allocation_id (str): The EIP Amazon identifier.
+ default = None
+ eip_address (str): The Elastic IP Address of the EIP.
+ default = None
+ if_exist_do_not_create (bool): if a nat gateway already exists in this
+ subnet, than do not create another one.
+ default = False
+ wait (bool): Wait for the nat to be in the deleted state before returning.
+ default = False
+ wait_timeout (int): Number of seconds to wait, until this timeout is reached.
+ default = 0
+ client_token (str):
+ default = None
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> subnet_id = 'subnet-w4t12897'
+ >>> allocation_id = 'eipalloc-36014da3'
+ >>> pre_create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
+ [
+ true,
+ "",
+ {
+ "nat_gateway_id": "nat-03835afb6e31df79b",
+ "subnet_id": "subnet-w4t12897",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "52.87.29.36",
+ "network_interface_id": "eni-5579742d",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-36014da3"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-w68571b5"
+ }
+ ]
+
+ Returns:
+ Tuple (bool, bool, str, list)
+ """
+ success = False
+ changed = False
+ err_msg = ""
+ results = list()
+
+ if not allocation_id and not eip_address:
+ existing_gateways, allocation_id_exists = (
+ gateway_in_subnet_exists(client, subnet_id, check_mode=check_mode)
+ )
+
+ if len(existing_gateways) > 0 and if_exist_do_not_create:
+ success = True
+ changed = False
+ results = existing_gateways[0]
+ err_msg = (
+ 'NAT Gateway {0} already exists in subnet_id {1}'
+ .format(
+ existing_gateways[0]['nat_gateway_id'], subnet_id
+ )
+ )
+ return success, changed, err_msg, results
+ else:
+ success, err_msg, allocation_id = (
+ allocate_eip_address(client, check_mode=check_mode)
+ )
+ if not success:
+ return success, 'False', err_msg, dict()
+
+ elif eip_address or allocation_id:
+ if eip_address and not allocation_id:
+ allocation_id, err_msg = (
+ get_eip_allocation_id_by_address(
+ client, eip_address, check_mode=check_mode
+ )
+ )
+ if not allocation_id:
+ success = False
+ changed = False
+ return success, changed, err_msg, dict()
+
+ existing_gateways, allocation_id_exists = (
+ gateway_in_subnet_exists(
+ client, subnet_id, allocation_id, check_mode=check_mode
+ )
+ )
+ if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create):
+ success = True
+ changed = False
+ results = existing_gateways[0]
+ err_msg = (
+ 'NAT Gateway {0} already exists in subnet_id {1}'
+ .format(
+ existing_gateways[0]['nat_gateway_id'], subnet_id
+ )
+ )
+ return success, changed, err_msg, results
+
+ success, changed, err_msg, results = create(
+ client, subnet_id, allocation_id, client_token,
+ wait, wait_timeout, if_exist_do_not_create, check_mode=check_mode
+ )
+
+ return success, changed, err_msg, results
+
+
+def remove(client, nat_gateway_id, wait=False, wait_timeout=0,
+ release_eip=False, check_mode=False):
+ """Delete an Amazon NAT Gateway.
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ nat_gateway_id (str): The Amazon nat id.
+
+ Kwargs:
+ wait (bool): Wait for the nat to be in the deleted state before returning.
+ wait_timeout (int): Number of seconds to wait, until this timeout is reached.
+ release_eip (bool): Once the nat has been deleted, you can deallocate the eip from the vpc.
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> nat_gw_id = 'nat-03835afb6e31df79b'
+ >>> remove(client, nat_gw_id, wait=True, wait_timeout=500, release_eip=True)
+ [
+ true,
+ "",
+ {
+ "nat_gateway_id": "nat-03835afb6e31df79b",
+ "subnet_id": "subnet-w4t12897",
+ "nat_gateway_addresses": [
+ {
+ "public_ip": "52.87.29.36",
+ "network_interface_id": "eni-5579742d",
+ "private_ip": "10.0.0.102",
+ "allocation_id": "eipalloc-36014da3"
+ }
+ ],
+ "state": "deleted",
+ "create_time": "2016-03-05T00:33:21.209000+00:00",
+ "delete_time": "2016-03-05T00:36:37.329000+00:00",
+ "vpc_id": "vpc-w68571b5"
+ }
+ ]
+
+ Returns:
+ Tuple (bool, str, list)
+ """
+ params = {
+ 'NatGatewayId': nat_gateway_id
+ }
+ success = False
+ changed = False
+ err_msg = ""
+ results = list()
+ states = ['pending', 'available' ]
+ try:
+ exist, _, gw = (
+ get_nat_gateways(
+ client, nat_gateway_id=nat_gateway_id,
+ states=states, check_mode=check_mode
+ )
+ )
+ if exist and len(gw) == 1:
+ results = gw[0]
+ if not check_mode:
+ client.delete_nat_gateway(**params)
+
+ allocation_id = (
+ results['nat_gateway_addresses'][0]['allocation_id']
+ )
+ changed = True
+ success = True
+ err_msg = (
+ 'NAT gateway {0} is in a deleting state. Delete was successfull'
+ .format(nat_gateway_id)
+ )
+
+ if wait:
+ status_achieved, err_msg, results = (
+ wait_for_status(
+ client, wait_timeout, nat_gateway_id, 'deleted',
+ check_mode=check_mode
+ )
+ )
+ if status_achieved:
+ err_msg = (
+ 'NAT gateway {0} was deleted successfully'
+ .format(nat_gateway_id)
+ )
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ if release_eip:
+ eip_released, eip_err = (
+ release_address(client, allocation_id, check_mode)
+ )
+ if not eip_released:
+ err_msg = (
+ "{0}: Failed to release EIP {1}: {2}"
+ .format(err_msg, allocation_id, eip_err)
+ )
+ success = False
+
+ return success, changed, err_msg, results
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ subnet_id=dict(type='str'),
+ eip_address=dict(type='str'),
+ allocation_id=dict(type='str'),
+ if_exist_do_not_create=dict(type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=320, required=False),
+ release_eip=dict(type='bool', default=False),
+ nat_gateway_id=dict(type='str'),
+ client_token=dict(type='str'),
+ )
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['allocation_id', 'eip_address']
+ ]
+ )
+
+ # Validate Requirements
+ if not HAS_BOTO3:
+ module.fail_json(msg='botocore/boto3 is required.')
+
+ state = module.params.get('state').lower()
+ check_mode = module.check_mode
+ subnet_id = module.params.get('subnet_id')
+ allocation_id = module.params.get('allocation_id')
+ eip_address = module.params.get('eip_address')
+ nat_gateway_id = module.params.get('nat_gateway_id')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ release_eip = module.params.get('release_eip')
+ client_token = module.params.get('client_token')
+ if_exist_do_not_create = module.params.get('if_exist_do_not_create')
+
+ try:
+ region, ec2_url, aws_connect_kwargs = (
+ get_aws_connection_info(module, boto3=True)
+ )
+ client = (
+ boto3_conn(
+ module, conn_type='client', resource='ec2',
+ region=region, endpoint=ec2_url, **aws_connect_kwargs
+ )
+ )
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Boto3 Client Error - " + str(e.msg))
+
+ changed = False
+ err_msg = ''
+
+ if state == 'present':
+ if not subnet_id:
+ module.fail_json(msg='subnet_id is required for creation')
+
+ success, changed, err_msg, results = (
+ pre_create(
+ client, subnet_id, allocation_id, eip_address,
+ if_exist_do_not_create, wait, wait_timeout,
+ client_token, check_mode=check_mode
+ )
+ )
+ else:
+ if not nat_gateway_id:
+ module.fail_json(msg='nat_gateway_id is required for removal')
+
+ else:
+ success, changed, err_msg, results = (
+ remove(
+ client, nat_gateway_id, wait, wait_timeout, release_eip,
+ check_mode=check_mode
+ )
+ )
+
+ if not success:
+ module.fail_json(
+ msg=err_msg, success=success, changed=changed
+ )
+ else:
+ module.exit_json(
+ msg=err_msg, success=success, changed=changed, **results
+ )
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_net_facts.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_net_facts.py
new file mode 100644
index 0000000000..14e1c4920f
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_net_facts.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_net_facts
+short_description: Gather facts about ec2 VPCs in AWS
+description:
+ - Gather facts about ec2 VPCs in AWS
+version_added: "2.1"
+author: "Rob White (@wimnat)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters.
+ required: false
+ default: null
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all VPCs
+- ec2_vpc_net_facts:
+
+# Gather facts about a particular VPC using VPC ID
+- ec2_vpc_net_facts:
+ filters:
+ vpc-id: vpc-00112233
+
+# Gather facts about any VPC with a tag key Name and value Example
+- ec2_vpc_net_facts:
+ filters:
+ "tag:Name": Example
+
+'''
+
+try:
+ import boto.vpc
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info
+
+
+def get_vpc_info(vpc):
+
+ try:
+ classic_link = vpc.classic_link_enabled
+ except AttributeError:
+ classic_link = False
+
+ vpc_info = { 'id': vpc.id,
+ 'instance_tenancy': vpc.instance_tenancy,
+ 'classic_link_enabled': classic_link,
+ 'dhcp_options_id': vpc.dhcp_options_id,
+ 'state': vpc.state,
+ 'is_default': vpc.is_default,
+ 'cidr_block': vpc.cidr_block,
+ 'tags': vpc.tags
+ }
+
+ return vpc_info
+
+def list_ec2_vpcs(connection, module):
+
+ filters = module.params.get("filters")
+ vpc_dict_array = []
+
+ try:
+ all_vpcs = connection.get_all_vpcs(filters=filters)
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ for vpc in all_vpcs:
+ vpc_dict_array.append(get_vpc_info(vpc))
+
+ module.exit_json(vpcs=vpc_dict_array)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ filters = dict(default=None, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, StandardError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_vpcs(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_peer.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_peer.py
new file mode 100644
index 0000000000..6615ba38a2
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_peer.py
@@ -0,0 +1,367 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: ec2_vpc_peer
+short_description: create, delete, accept, and reject VPC peering connections between two VPCs.
+description:
+ - Read the AWS documentation for VPC Peering Connections
+ U(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-peering.html)
+version_added: "2.2"
+options:
+ vpc_id:
+ description:
+ - VPC id of the requesting VPC.
+ required: false
+ peer_vpc_id:
+ description:
+ - VPC id of the accepting VPC.
+ required: false
+ peer_owner_id:
+ description:
+ - The AWS account number for cross account peering.
+ required: false
+ tags:
+ description:
+ - Dictionary of tags to look for and apply when creating a Peering Connection.
+ required: false
+ state:
+ description:
+ - Create, delete, accept, reject a peering connection.
+ required: false
+ default: present
+ choices: ['present', 'absent', 'accept', 'reject']
+author: Mike Mochan(@mmochan)
+extends_documentation_fragment: aws
+requirements: [ botocore, boto3, json ]
+'''
+
+EXAMPLES = '''
+# Complete example to create and accept a local peering connection.
+- name: Create local account VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-87654321
+ state: present
+ tags:
+ Name: Peering conenction for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Accept local VPC peering request
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ state: accept
+ register: action_peer
+
+# Complete example to delete a local peering connection.
+- name: Create local account VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-87654321
+ state: present
+ tags:
+ Name: Peering conenction for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: delete a local VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ state: absent
+ register: vpc_peer
+
+ # Complete example to create and accept a cross account peering connection.
+- name: Create cross account VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-12345678
+ peer_owner_id: 123456789102
+ state: present
+ tags:
+ Name: Peering conenction for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Accept peering connection from remote account
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ profile: bot03_profile_for_cross_account
+ state: accept
+ register: vpc_peer
+
+# Complete example to create and reject a local peering connection.
+- name: Create local account VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-87654321
+ state: present
+ tags:
+ Name: Peering conenction for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Reject a local VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ state: reject
+
+# Complete example to create and accept a cross account peering connection.
+- name: Create cross account VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-12345678
+ peer_owner_id: 123456789102
+ state: present
+ tags:
+ Name: Peering conenction for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Accept a cross account VPC peering connection request
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ profile: bot03_profile_for_cross_account
+ state: accept
+ tags:
+ Name: Peering conenction for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+
+# Complete example to create and reject a cross account peering connection.
+- name: Create cross account VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ vpc_id: vpc-12345678
+ peer_vpc_id: vpc-12345678
+ peer_owner_id: 123456789102
+ state: present
+ tags:
+ Name: Peering conenction for VPC 21 to VPC 22
+ CostCode: CC1234
+ Project: phoenix
+ register: vpc_peer
+
+- name: Reject a cross account VPC peering Connection
+ ec2_vpc_peer:
+ region: ap-southeast-2
+ peering_id: "{{ vpc_peer.peering_id }}"
+ profile: bot03_profile_for_cross_account
+ state: reject
+
+'''
+RETURN = '''
+task:
+ description: The result of the create, accept, reject or delete action.
+ returned: success
+ type: dictionary
+'''
+
+try:
+ import json
+ import botocore
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+def tags_changed(pcx_id, client, module):
+ changed = False
+ tags = dict()
+ if module.params.get('tags'):
+ tags = module.params.get('tags')
+ pcx = find_pcx_by_id(pcx_id, client, module)
+ if pcx['VpcPeeringConnections']:
+ pcx_values = [t.values() for t in pcx['VpcPeeringConnections'][0]['Tags']]
+ pcx_tags = [item for sublist in pcx_values for item in sublist]
+ tag_values = [[key, str(value)] for key, value in tags.iteritems()]
+ tags = [item for sublist in tag_values for item in sublist]
+ if sorted(pcx_tags) == sorted(tags):
+ changed = False
+ return changed
+ else:
+ delete_tags(pcx_id, client, module)
+ create_tags(pcx_id, client, module)
+ changed = True
+ return changed
+ return changed
+
+
+def describe_peering_connections(params, client):
+ result = client.describe_vpc_peering_connections(Filters=[
+ {'Name': 'requester-vpc-info.vpc-id', 'Values': [params['VpcId']]},
+ {'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]}
+ ])
+ if result['VpcPeeringConnections'] == []:
+ result = client.describe_vpc_peering_connections(Filters=[
+ {'Name': 'requester-vpc-info.vpc-id', 'Values': [params['PeerVpcId']]},
+ {'Name': 'accepter-vpc-info.vpc-id', 'Values': [params['VpcId']]}
+ ])
+ return result
+
+
+def is_active(peering_conn):
+ return peering_conn['Status']['Code'] == 'active'
+
+
+def is_pending(peering_conn):
+ return peering_conn['Status']['Code'] == 'pending-acceptance'
+
+
+def create_peer_connection(client, module):
+ changed = False
+ params = dict()
+ params['VpcId'] = module.params.get('vpc_id')
+ params['PeerVpcId'] = module.params.get('peer_vpc_id')
+ if module.params.get('peer_owner_id'):
+ params['PeerOwnerId'] = str(module.params.get('peer_owner_id'))
+ params['DryRun'] = module.check_mode
+ peering_conns = describe_peering_connections(params, client)
+ for peering_conn in peering_conns['VpcPeeringConnections']:
+ pcx_id = peering_conn['VpcPeeringConnectionId']
+ if tags_changed(pcx_id, client, module):
+ changed = True
+ if is_active(peering_conn):
+ return (changed, peering_conn['VpcPeeringConnectionId'])
+ if is_pending(peering_conn):
+ return (changed, peering_conn['VpcPeeringConnectionId'])
+ try:
+ peering_conn = client.create_vpc_peering_connection(**params)
+ pcx_id = peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId']
+ if module.params.get('tags'):
+ create_tags(pcx_id, client, module)
+ changed = True
+ return (changed, peering_conn['VpcPeeringConnection']['VpcPeeringConnectionId'])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def peer_status(client, module):
+ params = dict()
+ params['VpcPeeringConnectionIds'] = [module.params.get('peering_id')]
+ vpc_peering_connection = client.describe_vpc_peering_connections(**params)
+ return vpc_peering_connection['VpcPeeringConnections'][0]['Status']['Code']
+
+
+def accept_reject_delete(state, client, module):
+ changed = False
+ params = dict()
+ params['VpcPeeringConnectionId'] = module.params.get('peering_id')
+ params['DryRun'] = module.check_mode
+ invocations = {
+ 'accept': client.accept_vpc_peering_connection,
+ 'reject': client.reject_vpc_peering_connection,
+ 'absent': client.delete_vpc_peering_connection
+ }
+ if state == 'absent' or peer_status(client, module) != 'active':
+ try:
+ invocations[state](**params)
+ if module.params.get('tags'):
+ create_tags(params['VpcPeeringConnectionId'], client, module)
+ changed = True
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+ if tags_changed(params['VpcPeeringConnectionId'], client, module):
+ changed = True
+ return changed, params['VpcPeeringConnectionId']
+
+
+def load_tags(module):
+ tags = []
+ if module.params.get('tags'):
+ for name, value in module.params.get('tags').iteritems():
+ tags.append({'Key': name, 'Value': str(value)})
+ return tags
+
+
+def create_tags(pcx_id, client, module):
+ try:
+ delete_tags(pcx_id, client, module)
+ client.create_tags(Resources=[pcx_id], Tags=load_tags(module))
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def delete_tags(pcx_id, client, module):
+ try:
+ client.delete_tags(Resources=[pcx_id])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def find_pcx_by_id(pcx_id, client, module):
+ try:
+ return client.describe_vpc_peering_connections(VpcPeeringConnectionIds=[pcx_id])
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=str(e))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ vpc_id=dict(),
+ peer_vpc_id=dict(),
+ peering_id=dict(),
+ peer_owner_id=dict(),
+ tags=dict(required=False, type='dict'),
+ profile=dict(),
+ state=dict(default='present', choices=['present', 'absent', 'accept', 'reject'])
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='json, botocore and boto3 are required.')
+ state = module.params.get('state').lower()
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except botocore.exceptions.NoCredentialsError as e:
+ module.fail_json(msg="Can't authorize connection - "+str(e))
+
+ if state == 'present':
+ (changed, results) = create_peer_connection(client, module)
+ module.exit_json(changed=changed, peering_id=results)
+ else:
+ (changed, results) = accept_reject_delete(state, client, module)
+ module.exit_json(changed=changed, peering_id=results)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_route_table.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_route_table.py
new file mode 100644
index 0000000000..1529d92353
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_route_table.py
@@ -0,0 +1,637 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_route_table
+short_description: Manage route tables for AWS virtual private clouds
+description:
+ - Manage route tables for AWS virtual private clouds
+version_added: "2.0"
+author: Robert Estelle (@erydo), Rob White (@wimnat)
+options:
+ lookup:
+ description:
+ - "Look up route table by either tags or by route table ID. Non-unique tag lookup will fail. If no tags are specifed then no lookup for an existing route table is performed and a new route table will be created. To change tags of a route table, you must look up by id."
+ required: false
+ default: tag
+ choices: [ 'tag', 'id' ]
+ propagating_vgw_ids:
+ description:
+ - "Enable route propagation from virtual gateways specified by ID."
+ default: None
+ required: false
+ route_table_id:
+ description:
+ - "The ID of the route table to update or delete."
+ required: false
+ default: null
+ routes:
+ description:
+ - "List of routes in the route table.
+ Routes are specified as dicts containing the keys 'dest' and one of 'gateway_id',
+ 'instance_id', 'interface_id', or 'vpc_peering_connection_id'.
+ If 'gateway_id' is specified, you can refer to the VPC's IGW by using the value 'igw'. Routes are required for present states."
+ required: false
+ default: None
+ state:
+ description:
+ - "Create or destroy the VPC route table"
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ subnets:
+ description:
+ - "An array of subnets to add to this route table. Subnets may be specified by either subnet ID, Name tag, or by a CIDR such as '10.0.0.0/24'."
+ required: true
+ tags:
+ description:
+ - "A dictionary of resource tags of the form: { tag1: value1, tag2: value2 }. Tags are used to uniquely identify route tables within a VPC when the route_table_id is not supplied."
+ required: false
+ default: null
+ aliases: [ "resource_tags" ]
+ vpc_id:
+ description:
+ - "VPC ID of the VPC in which to create the route table."
+ required: true
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic creation example:
+- name: Set up public subnet route table
+ ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ region: us-west-1
+ tags:
+ Name: Public
+ subnets:
+ - "{{ jumpbox_subnet.subnet.id }}"
+ - "{{ frontend_subnet.subnet.id }}"
+ - "{{ vpn_subnet.subnet_id }}"
+ routes:
+ - dest: 0.0.0.0/0
+ gateway_id: "{{ igw.gateway_id }}"
+ register: public_route_table
+
+- name: Set up NAT-protected route table
+ ec2_vpc_route_table:
+ vpc_id: vpc-1245678
+ region: us-west-1
+ tags:
+ Name: Internal
+ subnets:
+ - "{{ application_subnet.subnet.id }}"
+ - 'Database Subnet'
+ - '10.0.0.0/8'
+ routes:
+ - dest: 0.0.0.0/0
+ instance_id: "{{ nat.instance_id }}"
+ register: nat_route_table
+
+'''
+
+import re
+
+try:
+ import boto.ec2
+ import boto.vpc
+ from boto.exception import EC2ResponseError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+ if __name__ != '__main__':
+ raise
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
+
+
+class AnsibleRouteTableException(Exception):
+ pass
+
+
+class AnsibleIgwSearchException(AnsibleRouteTableException):
+ pass
+
+
+class AnsibleTagCreationException(AnsibleRouteTableException):
+ pass
+
+
+class AnsibleSubnetSearchException(AnsibleRouteTableException):
+ pass
+
+CIDR_RE = re.compile('^(\d{1,3}\.){3}\d{1,3}\/\d{1,2}$')
+SUBNET_RE = re.compile('^subnet-[A-z0-9]+$')
+ROUTE_TABLE_RE = re.compile('^rtb-[A-z0-9]+$')
+
+
+def find_subnets(vpc_conn, vpc_id, identified_subnets):
+ """
+ Finds a list of subnets, each identified either by a raw ID, a unique
+ 'Name' tag, or a CIDR such as 10.0.0.0/8.
+
+ Note that this function is duplicated in other ec2 modules, and should
+ potentially be moved into potentially be moved into a shared module_utils
+ """
+ subnet_ids = []
+ subnet_names = []
+ subnet_cidrs = []
+ for subnet in (identified_subnets or []):
+ if re.match(SUBNET_RE, subnet):
+ subnet_ids.append(subnet)
+ elif re.match(CIDR_RE, subnet):
+ subnet_cidrs.append(subnet)
+ else:
+ subnet_names.append(subnet)
+
+ subnets_by_id = []
+ if subnet_ids:
+ subnets_by_id = vpc_conn.get_all_subnets(
+ subnet_ids, filters={'vpc_id': vpc_id})
+
+ for subnet_id in subnet_ids:
+ if not any(s.id == subnet_id for s in subnets_by_id):
+ raise AnsibleSubnetSearchException(
+ 'Subnet ID "{0}" does not exist'.format(subnet_id))
+
+ subnets_by_cidr = []
+ if subnet_cidrs:
+ subnets_by_cidr = vpc_conn.get_all_subnets(
+ filters={'vpc_id': vpc_id, 'cidr': subnet_cidrs})
+
+ for cidr in subnet_cidrs:
+ if not any(s.cidr_block == cidr for s in subnets_by_cidr):
+ raise AnsibleSubnetSearchException(
+ 'Subnet CIDR "{0}" does not exist'.format(cidr))
+
+ subnets_by_name = []
+ if subnet_names:
+ subnets_by_name = vpc_conn.get_all_subnets(
+ filters={'vpc_id': vpc_id, 'tag:Name': subnet_names})
+
+ for name in subnet_names:
+ matching_count = len([1 for s in subnets_by_name if s.tags.get('Name') == name])
+ if matching_count == 0:
+ raise AnsibleSubnetSearchException(
+ 'Subnet named "{0}" does not exist'.format(name))
+ elif matching_count > 1:
+ raise AnsibleSubnetSearchException(
+ 'Multiple subnets named "{0}"'.format(name))
+
+ return subnets_by_id + subnets_by_cidr + subnets_by_name
+
+
+def find_igw(vpc_conn, vpc_id):
+ """
+ Finds the Internet gateway for the given VPC ID.
+
+ Raises an AnsibleIgwSearchException if either no IGW can be found, or more
+ than one found for the given VPC.
+
+ Note that this function is duplicated in other ec2 modules, and should
+ potentially be moved into potentially be moved into a shared module_utils
+ """
+ igw = vpc_conn.get_all_internet_gateways(
+ filters={'attachment.vpc-id': vpc_id})
+
+ if not igw:
+ raise AnsibleIgwSearchException('No IGW found for VPC {0}'.
+ format(vpc_id))
+ elif len(igw) == 1:
+ return igw[0].id
+ else:
+ raise AnsibleIgwSearchException('Multiple IGWs found for VPC {0}'.
+ format(vpc_id))
+
+
+def get_resource_tags(vpc_conn, resource_id):
+ return dict((t.name, t.value) for t in
+ vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
+
+
+def tags_match(match_tags, candidate_tags):
+ return all((k in candidate_tags and candidate_tags[k] == v
+ for k, v in match_tags.iteritems()))
+
+
+def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode):
+ try:
+ cur_tags = get_resource_tags(vpc_conn, resource_id)
+ if tags == cur_tags:
+ return {'changed': False, 'tags': cur_tags}
+
+ to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
+ if to_delete and not add_only:
+ vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode)
+
+ to_add = dict((k, tags[k]) for k in tags if k not in cur_tags)
+ if to_add:
+ vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode)
+
+ latest_tags = get_resource_tags(vpc_conn, resource_id)
+ return {'changed': True, 'tags': latest_tags}
+ except EC2ResponseError as e:
+ raise AnsibleTagCreationException(
+ 'Unable to update tags for {0}, error: {1}'.format(resource_id, e))
+
+
+def get_route_table_by_id(vpc_conn, vpc_id, route_table_id):
+
+ route_table = None
+ route_tables = vpc_conn.get_all_route_tables(route_table_ids=[route_table_id], filters={'vpc_id': vpc_id})
+ if route_tables:
+ route_table = route_tables[0]
+
+ return route_table
+
+def get_route_table_by_tags(vpc_conn, vpc_id, tags):
+
+ count = 0
+ route_table = None
+ route_tables = vpc_conn.get_all_route_tables(filters={'vpc_id': vpc_id})
+ for table in route_tables:
+ this_tags = get_resource_tags(vpc_conn, table.id)
+ if tags_match(tags, this_tags):
+ route_table = table
+ count +=1
+
+ if count > 1:
+ raise RuntimeError("Tags provided do not identify a unique route table")
+ else:
+ return route_table
+
+
+def route_spec_matches_route(route_spec, route):
+ key_attr_map = {
+ 'destination_cidr_block': 'destination_cidr_block',
+ 'gateway_id': 'gateway_id',
+ 'instance_id': 'instance_id',
+ 'interface_id': 'interface_id',
+ 'vpc_peering_connection_id': 'vpc_peering_connection_id',
+ }
+
+ # This is a workaround to catch managed NAT gateways as they do not show
+ # up in any of the returned values when describing route tables.
+ # The caveat of doing it this way is that if there was an existing
+ # route for another nat gateway in this route table there is not a way to
+ # change to another nat gateway id. Long term solution would be to utilise
+ # boto3 which is a very big task for this module or to update boto.
+ if route_spec.get('gateway_id') and 'nat-' in route_spec['gateway_id']:
+ if route.destination_cidr_block == route_spec['destination_cidr_block']:
+ if all((not route.gateway_id, not route.instance_id, not route.interface_id, not route.vpc_peering_connection_id)):
+ return True
+
+ for k in key_attr_map:
+ if k in route_spec:
+ if route_spec[k] != getattr(route, k):
+ return False
+ return True
+
+
+def rename_key(d, old_key, new_key):
+ d[new_key] = d[old_key]
+ del d[old_key]
+
+
+def index_of_matching_route(route_spec, routes_to_match):
+ for i, route in enumerate(routes_to_match):
+ if route_spec_matches_route(route_spec, route):
+ return i
+
+
+def ensure_routes(vpc_conn, route_table, route_specs, propagating_vgw_ids,
+ check_mode):
+ routes_to_match = list(route_table.routes)
+ route_specs_to_create = []
+ for route_spec in route_specs:
+ i = index_of_matching_route(route_spec, routes_to_match)
+ if i is None:
+ route_specs_to_create.append(route_spec)
+ else:
+ del routes_to_match[i]
+
+ # NOTE: As of boto==2.38.0, the origin of a route is not available
+ # (for example, whether it came from a gateway with route propagation
+ # enabled). Testing for origin == 'EnableVgwRoutePropagation' is more
+ # correct than checking whether the route uses a propagating VGW.
+ # The current logic will leave non-propagated routes using propagating
+ # VGWs in place.
+ routes_to_delete = []
+ for r in routes_to_match:
+ if r.gateway_id:
+ if r.gateway_id != 'local' and not r.gateway_id.startswith('vpce-'):
+ if not propagating_vgw_ids or r.gateway_id not in propagating_vgw_ids:
+ routes_to_delete.append(r)
+ else:
+ routes_to_delete.append(r)
+
+ changed = bool(routes_to_delete or route_specs_to_create)
+ if changed:
+ for route in routes_to_delete:
+ try:
+ vpc_conn.delete_route(route_table.id,
+ route.destination_cidr_block,
+ dry_run=check_mode)
+ except EC2ResponseError as e:
+ if e.error_code == 'DryRunOperation':
+ pass
+
+ for route_spec in route_specs_to_create:
+ try:
+ vpc_conn.create_route(route_table.id,
+ dry_run=check_mode,
+ **route_spec)
+ except EC2ResponseError as e:
+ if e.error_code == 'DryRunOperation':
+ pass
+
+ return {'changed': bool(changed)}
+
+
+def ensure_subnet_association(vpc_conn, vpc_id, route_table_id, subnet_id,
+ check_mode):
+ route_tables = vpc_conn.get_all_route_tables(
+ filters={'association.subnet_id': subnet_id, 'vpc_id': vpc_id}
+ )
+ for route_table in route_tables:
+ if route_table.id is None:
+ continue
+ for a in route_table.associations:
+ if a.subnet_id == subnet_id:
+ if route_table.id == route_table_id:
+ return {'changed': False, 'association_id': a.id}
+ else:
+ if check_mode:
+ return {'changed': True}
+ vpc_conn.disassociate_route_table(a.id)
+
+ association_id = vpc_conn.associate_route_table(route_table_id, subnet_id)
+ return {'changed': True, 'association_id': association_id}
+
+
+def ensure_subnet_associations(vpc_conn, vpc_id, route_table, subnets,
+ check_mode):
+ current_association_ids = [a.id for a in route_table.associations]
+ new_association_ids = []
+ changed = False
+ for subnet in subnets:
+ result = ensure_subnet_association(
+ vpc_conn, vpc_id, route_table.id, subnet.id, check_mode)
+ changed = changed or result['changed']
+ if changed and check_mode:
+ return {'changed': True}
+ new_association_ids.append(result['association_id'])
+
+ to_delete = [a_id for a_id in current_association_ids
+ if a_id not in new_association_ids]
+
+ for a_id in to_delete:
+ changed = True
+ vpc_conn.disassociate_route_table(a_id, dry_run=check_mode)
+
+ return {'changed': changed}
+
+
+def ensure_propagation(vpc_conn, route_table, propagating_vgw_ids,
+ check_mode):
+
+ # NOTE: As of boto==2.38.0, it is not yet possible to query the existing
+ # propagating gateways. However, EC2 does support this as shown in its API
+ # documentation. For now, a reasonable proxy for this is the presence of
+ # propagated routes using the gateway in the route table. If such a route
+ # is found, propagation is almost certainly enabled.
+ changed = False
+ for vgw_id in propagating_vgw_ids:
+ for r in list(route_table.routes):
+ if r.gateway_id == vgw_id:
+ return {'changed': False}
+
+ changed = True
+ vpc_conn.enable_vgw_route_propagation(route_table.id,
+ vgw_id,
+ dry_run=check_mode)
+
+ return {'changed': changed}
+
+
+def ensure_route_table_absent(connection, module):
+
+ lookup = module.params.get('lookup')
+ route_table_id = module.params.get('route_table_id')
+ tags = module.params.get('tags')
+ vpc_id = module.params.get('vpc_id')
+
+ if lookup == 'tag':
+ if tags is not None:
+ try:
+ route_table = get_route_table_by_tags(connection, vpc_id, tags)
+ except EC2ResponseError as e:
+ module.fail_json(msg=e.message)
+ except RuntimeError as e:
+ module.fail_json(msg=e.args[0])
+ else:
+ route_table = None
+ elif lookup == 'id':
+ try:
+ route_table = get_route_table_by_id(connection, vpc_id, route_table_id)
+ except EC2ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ if route_table is None:
+ return {'changed': False}
+
+ try:
+ connection.delete_route_table(route_table.id, dry_run=module.check_mode)
+ except EC2ResponseError as e:
+ if e.error_code == 'DryRunOperation':
+ pass
+ else:
+ module.fail_json(msg=e.message)
+
+ return {'changed': True}
+
+
+def get_route_table_info(route_table):
+
+ # Add any routes to array
+ routes = []
+ for route in route_table.routes:
+ routes.append(route.__dict__)
+
+ route_table_info = { 'id': route_table.id,
+ 'routes': routes,
+ 'tags': route_table.tags,
+ 'vpc_id': route_table.vpc_id
+ }
+
+ return route_table_info
+
+
+def create_route_spec(connection, module, vpc_id):
+ routes = module.params.get('routes')
+
+ for route_spec in routes:
+ rename_key(route_spec, 'dest', 'destination_cidr_block')
+
+ if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw':
+ igw = find_igw(connection, vpc_id)
+ route_spec['gateway_id'] = igw
+
+ return routes
+
+
+def ensure_route_table_present(connection, module):
+
+ lookup = module.params.get('lookup')
+ propagating_vgw_ids = module.params.get('propagating_vgw_ids')
+ route_table_id = module.params.get('route_table_id')
+ subnets = module.params.get('subnets')
+ tags = module.params.get('tags')
+ vpc_id = module.params.get('vpc_id')
+ try:
+ routes = create_route_spec(connection, module, vpc_id)
+ except AnsibleIgwSearchException as e:
+ module.fail_json(msg=e[0])
+
+ changed = False
+ tags_valid = False
+
+ if lookup == 'tag':
+ if tags is not None:
+ try:
+ route_table = get_route_table_by_tags(connection, vpc_id, tags)
+ except EC2ResponseError as e:
+ module.fail_json(msg=e.message)
+ except RuntimeError as e:
+ module.fail_json(msg=e.args[0])
+ else:
+ route_table = None
+ elif lookup == 'id':
+ try:
+ route_table = get_route_table_by_id(connection, vpc_id, route_table_id)
+ except EC2ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ # If no route table returned then create new route table
+ if route_table is None:
+ try:
+ route_table = connection.create_route_table(vpc_id, module.check_mode)
+ changed = True
+ except EC2ResponseError as e:
+ if e.error_code == 'DryRunOperation':
+ module.exit_json(changed=True)
+
+ module.fail_json(msg=e.message)
+
+ if routes is not None:
+ try:
+ result = ensure_routes(connection, route_table, routes, propagating_vgw_ids, module.check_mode)
+ changed = changed or result['changed']
+ except EC2ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ if propagating_vgw_ids is not None:
+ result = ensure_propagation(connection, route_table,
+ propagating_vgw_ids,
+ check_mode=module.check_mode)
+ changed = changed or result['changed']
+
+ if not tags_valid and tags is not None:
+ result = ensure_tags(connection, route_table.id, tags,
+ add_only=True, check_mode=module.check_mode)
+ changed = changed or result['changed']
+
+ if subnets:
+ associated_subnets = []
+ try:
+ associated_subnets = find_subnets(connection, vpc_id, subnets)
+ except EC2ResponseError as e:
+ raise AnsibleRouteTableException(
+ 'Unable to find subnets for route table {0}, error: {1}'
+ .format(route_table, e)
+ )
+
+ try:
+ result = ensure_subnet_associations(connection, vpc_id, route_table, associated_subnets, module.check_mode)
+ changed = changed or result['changed']
+ except EC2ResponseError as e:
+ raise AnsibleRouteTableException(
+ 'Unable to associate subnets for route table {0}, error: {1}'
+ .format(route_table, e)
+ )
+
+ module.exit_json(changed=changed, route_table=get_route_table_info(route_table))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ lookup = dict(default='tag', required=False, choices=['tag', 'id']),
+ propagating_vgw_ids = dict(default=None, required=False, type='list'),
+ route_table_id = dict(default=None, required=False),
+ routes = dict(default=[], required=False, type='list'),
+ state = dict(default='present', choices=['present', 'absent']),
+ subnets = dict(default=None, required=False, type='list'),
+ tags = dict(default=None, required=False, type='dict', aliases=['resource_tags']),
+ vpc_id = dict(default=None, required=True)
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ lookup = module.params.get('lookup')
+ route_table_id = module.params.get('route_table_id')
+ state = module.params.get('state', 'present')
+
+ if lookup == 'id' and route_table_id is None:
+ module.fail_json("You must specify route_table_id if lookup is set to id")
+
+ try:
+ if state == 'present':
+ result = ensure_route_table_present(connection, module)
+ elif state == 'absent':
+ result = ensure_route_table_absent(connection, module)
+ except AnsibleRouteTableException as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_route_table_facts.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_route_table_facts.py
new file mode 100644
index 0000000000..f270f2cbb2
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_route_table_facts.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_route_table_facts
+short_description: Gather facts about ec2 VPC route tables in AWS
+description:
+ - Gather facts about ec2 VPC route tables in AWS
+version_added: "2.0"
+author: "Rob White (@wimnat)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
+ required: false
+ default: null
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all VPC route tables
+- ec2_vpc_route_table_facts:
+
+# Gather facts about a particular VPC route table using route table ID
+- ec2_vpc_route_table_facts:
+ filters:
+ route-table-id: rtb-00112233
+
+# Gather facts about any VPC route table with a tag key Name and value Example
+- ec2_vpc_route_table_facts:
+ filters:
+ "tag:Name": Example
+
+# Gather facts about any VPC route table within VPC with ID vpc-abcdef00
+- ec2_vpc_route_table_facts:
+ filters:
+ vpc-id: vpc-abcdef00
+
+'''
+
+try:
+ import boto.vpc
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
+
+
+def get_route_table_info(route_table):
+
+ # Add any routes to array
+ routes = []
+ for route in route_table.routes:
+ routes.append(route.__dict__)
+
+ route_table_info = { 'id': route_table.id,
+ 'routes': routes,
+ 'tags': route_table.tags,
+ 'vpc_id': route_table.vpc_id
+ }
+
+ return route_table_info
+
+def list_ec2_vpc_route_tables(connection, module):
+
+ filters = module.params.get("filters")
+ route_table_dict_array = []
+
+ try:
+ all_route_tables = connection.get_all_route_tables(filters=filters)
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ for route_table in all_route_tables:
+ route_table_dict_array.append(get_route_table_info(route_table))
+
+ module.exit_json(route_tables=route_table_dict_array)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ filters = dict(default=None, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_vpc_route_tables(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_subnet.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_subnet.py
new file mode 100644
index 0000000000..dc66d44586
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_subnet.py
@@ -0,0 +1,276 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_subnet
+short_description: Manage subnets in AWS virtual private clouds
+description:
+ - Manage subnets in AWS virtual private clouds
+version_added: "2.0"
+author: Robert Estelle (@erydo)
+options:
+ az:
+ description:
+ - "The availability zone for the subnet. Only required when state=present."
+ required: false
+ default: null
+ cidr:
+ description:
+ - "The CIDR block for the subnet. E.g. 192.0.2.0/24. Only required when state=present."
+ required: false
+ default: null
+ tags:
+ description:
+ - "A dict of tags to apply to the subnet. Any tags currently applied to the subnet and not present here will be removed."
+ required: false
+ default: null
+ aliases: [ 'resource_tags' ]
+ state:
+ description:
+ - "Create or remove the subnet"
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ vpc_id:
+ description:
+ - "VPC ID of the VPC in which to create the subnet."
+ required: false
+ default: null
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Create subnet for database servers
+ ec2_vpc_subnet:
+ state: present
+ vpc_id: vpc-123456
+ cidr: 10.0.1.16/28
+ resource_tags:
+ Name: Database Subnet
+ register: database_subnet
+
+- name: Remove subnet for database servers
+ ec2_vpc_subnet:
+ state: absent
+ vpc_id: vpc-123456
+ cidr: 10.0.1.16/28
+
+'''
+
+import time
+
+try:
+ import boto.ec2
+ import boto.vpc
+ from boto.exception import EC2ResponseError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+ if __name__ != '__main__':
+ raise
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
+
+
+class AnsibleVPCSubnetException(Exception):
+ pass
+
+
+class AnsibleVPCSubnetCreationException(AnsibleVPCSubnetException):
+ pass
+
+
+class AnsibleVPCSubnetDeletionException(AnsibleVPCSubnetException):
+ pass
+
+
+class AnsibleTagCreationException(AnsibleVPCSubnetException):
+ pass
+
+
+def get_subnet_info(subnet):
+
+ subnet_info = { 'id': subnet.id,
+ 'availability_zone': subnet.availability_zone,
+ 'available_ip_address_count': subnet.available_ip_address_count,
+ 'cidr_block': subnet.cidr_block,
+ 'default_for_az': subnet.defaultForAz,
+ 'map_public_ip_on_launch': subnet.mapPublicIpOnLaunch,
+ 'state': subnet.state,
+ 'tags': subnet.tags,
+ 'vpc_id': subnet.vpc_id
+ }
+
+ return subnet_info
+
+def subnet_exists(vpc_conn, subnet_id):
+ filters = {'subnet-id': subnet_id}
+ subnet = vpc_conn.get_all_subnets(filters=filters)
+ if subnet and subnet[0].state == "available":
+ return subnet[0]
+ else:
+ return False
+
+
+def create_subnet(vpc_conn, vpc_id, cidr, az, check_mode):
+ try:
+ new_subnet = vpc_conn.create_subnet(vpc_id, cidr, az, dry_run=check_mode)
+ # Sometimes AWS takes its time to create a subnet and so using
+ # new subnets's id to do things like create tags results in
+ # exception. boto doesn't seem to refresh 'state' of the newly
+ # created subnet, i.e.: it's always 'pending'.
+ subnet = False
+ while subnet is False:
+ subnet = subnet_exists(vpc_conn, new_subnet.id)
+ time.sleep(0.1)
+ except EC2ResponseError as e:
+ if e.error_code == "DryRunOperation":
+ subnet = None
+ else:
+ raise AnsibleVPCSubnetCreationException(
+ 'Unable to create subnet {0}, error: {1}'.format(cidr, e))
+
+ return subnet
+
+
+def get_resource_tags(vpc_conn, resource_id):
+ return dict((t.name, t.value) for t in
+ vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
+
+
+def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode):
+ try:
+ cur_tags = get_resource_tags(vpc_conn, resource_id)
+ if cur_tags == tags:
+ return {'changed': False, 'tags': cur_tags}
+
+ to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
+ if to_delete and not add_only:
+ vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode)
+
+ to_add = dict((k, tags[k]) for k in tags if k not in cur_tags or cur_tags[k] != tags[k])
+ if to_add:
+ vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode)
+
+ latest_tags = get_resource_tags(vpc_conn, resource_id)
+ return {'changed': True, 'tags': latest_tags}
+ except EC2ResponseError as e:
+ raise AnsibleTagCreationException(
+ 'Unable to update tags for {0}, error: {1}'.format(resource_id, e))
+
+
+def get_matching_subnet(vpc_conn, vpc_id, cidr):
+ subnets = vpc_conn.get_all_subnets(filters={'vpc_id': vpc_id})
+ return next((s for s in subnets if s.cidr_block == cidr), None)
+
+
+def ensure_subnet_present(vpc_conn, vpc_id, cidr, az, tags, check_mode):
+ subnet = get_matching_subnet(vpc_conn, vpc_id, cidr)
+ changed = False
+ if subnet is None:
+ subnet = create_subnet(vpc_conn, vpc_id, cidr, az, check_mode)
+ changed = True
+ # Subnet will be None when check_mode is true
+ if subnet is None:
+ return {
+ 'changed': changed,
+ 'subnet': {}
+ }
+
+ if tags != subnet.tags:
+ ensure_tags(vpc_conn, subnet.id, tags, False, check_mode)
+ subnet.tags = tags
+ changed = True
+
+ subnet_info = get_subnet_info(subnet)
+
+ return {
+ 'changed': changed,
+ 'subnet': subnet_info
+ }
+
+
+def ensure_subnet_absent(vpc_conn, vpc_id, cidr, check_mode):
+ subnet = get_matching_subnet(vpc_conn, vpc_id, cidr)
+ if subnet is None:
+ return {'changed': False}
+
+ try:
+ vpc_conn.delete_subnet(subnet.id, dry_run=check_mode)
+ return {'changed': True}
+ except EC2ResponseError as e:
+ raise AnsibleVPCSubnetDeletionException(
+ 'Unable to delete subnet {0}, error: {1}'
+ .format(subnet.cidr_block, e))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ az = dict(default=None, required=False),
+ cidr = dict(default=None, required=True),
+ state = dict(default='present', choices=['present', 'absent']),
+ tags = dict(default=None, required=False, type='dict', aliases=['resource_tags']),
+ vpc_id = dict(default=None, required=True)
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ vpc_id = module.params.get('vpc_id')
+ tags = module.params.get('tags')
+ cidr = module.params.get('cidr')
+ az = module.params.get('az')
+ state = module.params.get('state')
+
+ try:
+ if state == 'present':
+ result = ensure_subnet_present(connection, vpc_id, cidr, az, tags,
+ check_mode=module.check_mode)
+ elif state == 'absent':
+ result = ensure_subnet_absent(connection, vpc_id, cidr,
+ check_mode=module.check_mode)
+ except AnsibleVPCSubnetException as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_facts.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_facts.py
new file mode 100644
index 0000000000..f881833468
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_facts.py
@@ -0,0 +1,147 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_vpc_subnet_facts
+short_description: Gather facts about ec2 VPC subnets in AWS
+description:
+ - Gather facts about ec2 VPC subnets in AWS
+version_added: "2.1"
+author: "Rob White (@wimnat)"
+options:
+ filters:
+ description:
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters.
+ required: false
+ default: null
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Gather facts about all VPC subnets
+- ec2_vpc_subnet_facts:
+
+# Gather facts about a particular VPC subnet using ID
+- ec2_vpc_subnet_facts:
+ filters:
+ subnet-id: subnet-00112233
+
+# Gather facts about any VPC subnet with a tag key Name and value Example
+- ec2_vpc_subnet_facts:
+ filters:
+ "tag:Name": Example
+
+# Gather facts about any VPC subnet within VPC with ID vpc-abcdef00
+- ec2_vpc_subnet_facts:
+ filters:
+ vpc-id: vpc-abcdef00
+
+# Gather facts about a set of VPC subnets, publicA, publicB and publicC within a
+# VPC with ID vpc-abcdef00 and then use the jinja map function to return the
+# subnet_ids as a list.
+
+- ec2_vpc_subnet_facts:
+ filters:
+ vpc-id: vpc-abcdef00
+ "tag:Name": "{{ item }}"
+ with_items:
+ - publicA
+ - publicB
+ - publicC
+ register: subnet_facts
+
+- set_fact:
+ subnet_ids: "{{ subnet_facts.results|map(attribute='subnets.0.id')|list }}"
+'''
+
+try:
+ import boto.vpc
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
+
+
+def get_subnet_info(subnet):
+
+ subnet_info = { 'id': subnet.id,
+ 'availability_zone': subnet.availability_zone,
+ 'available_ip_address_count': subnet.available_ip_address_count,
+ 'cidr_block': subnet.cidr_block,
+ 'default_for_az': subnet.defaultForAz,
+ 'map_public_ip_on_launch': subnet.mapPublicIpOnLaunch,
+ 'state': subnet.state,
+ 'tags': subnet.tags,
+ 'vpc_id': subnet.vpc_id
+ }
+
+ return subnet_info
+
+def list_ec2_vpc_subnets(connection, module):
+
+ filters = module.params.get("filters")
+ subnet_dict_array = []
+
+ try:
+ all_subnets = connection.get_all_subnets(filters=filters)
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ for subnet in all_subnets:
+ subnet_dict_array.append(get_subnet_info(subnet))
+
+ module.exit_json(subnets=subnet_dict_array)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ filters = dict(default=None, type='dict')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_ec2_vpc_subnets(connection, module)
+
+
+if __name__ == '__main__':
+ main() \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py
new file mode 100644
index 0000000000..40eb386156
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw.py
@@ -0,0 +1,602 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: ec2_vpc_vgw
+short_description: Create and delete AWS VPN Virtual Gateways.
+description:
+ - Creates AWS VPN Virtual Gateways
+ - Deletes AWS VPN Virtual Gateways
+ - Attaches Virtual Gateways to VPCs
+ - Detaches Virtual Gateways from VPCs
+version_added: "2.2"
+requirements: [ boto3 ]
+options:
+ state:
+ description:
+ - present to ensure resource is created.
+ - absent to remove resource
+ required: false
+ default: present
+ choices: [ "present", "absent"]
+ name:
+ description:
+ - name of the vgw to be created or deleted
+ required: false
+ type:
+ description:
+ - type of the virtual gateway to be created
+ required: false
+ choices: [ "ipsec.1" ]
+ vpn_gateway_id:
+ description:
+ - vpn gateway id of an existing virtual gateway
+ required: false
+ vpc_id:
+ description:
+ - the vpc-id of a vpc to attach or detach
+ required: false
+ wait_timeout:
+ description:
+ - number of seconds to wait for status during vpc attach and detach
+ required: false
+ default: 320
+ tags:
+ description:
+ - dictionary of resource tags
+ required: false
+ default: null
+ aliases: [ "resource_tags" ]
+author: Nick Aslanidis (@naslanidis)
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+- name: Create a new vgw attached to a specific VPC
+ ec2_vpc_vgw:
+ state: present
+ region: ap-southeast-2
+ profile: personal
+ vpc_id: vpc-12345678
+ name: personal-testing
+ type: ipsec.1
+ register: created_vgw
+
+- name: Create a new unattached vgw
+ ec2_vpc_vgw:
+ state: present
+ region: ap-southeast-2
+ profile: personal
+ name: personal-testing
+ type: ipsec.1
+ tags:
+ environment: production
+ owner: ABC
+ register: created_vgw
+
+- name: Remove a new vgw using the name
+ ec2_vpc_vgw:
+ state: absent
+ region: ap-southeast-2
+ profile: personal
+ name: personal-testing
+ type: ipsec.1
+ register: deleted_vgw
+
+- name: Remove a new vgw using the vpn_gateway_id
+ ec2_vpc_vgw:
+ state: absent
+ region: ap-southeast-2
+ profile: personal
+ vpn_gateway_id: vgw-3a9aa123
+ register: deleted_vgw
+'''
+
+RETURN = '''
+result:
+ description: The result of the create, or delete action.
+ returned: success
+ type: dictionary
+'''
+
+try:
+ import json
+ import time
+ import botocore
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+def get_vgw_info(vgws):
+ if not isinstance(vgws, list):
+ return
+
+ for vgw in vgws:
+ vgw_info = {
+ 'id': vgw['VpnGatewayId'],
+ 'type': vgw['Type'],
+ 'state': vgw['State'],
+ 'vpc_id': None,
+ 'tags': dict()
+ }
+
+ for tag in vgw['Tags']:
+ vgw_info['tags'][tag['Key']] = tag['Value']
+
+ if len(vgw['VpcAttachments']) != 0 and vgw['VpcAttachments'][0]['State'] == 'attached':
+ vgw_info['vpc_id'] = vgw['VpcAttachments'][0]['VpcId']
+
+ return vgw_info
+
+def wait_for_status(client, module, vpn_gateway_id, status):
+ polling_increment_secs = 15
+ max_retries = (module.params.get('wait_timeout') / polling_increment_secs)
+ status_achieved = False
+
+ for x in range(0, max_retries):
+ try:
+ response = find_vgw(client, module, vpn_gateway_id)
+ if response[0]['VpcAttachments'][0]['State'] == status:
+ status_achieved = True
+ break
+ else:
+ time.sleep(polling_increment_secs)
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ result = response
+ return status_achieved, result
+
+
+def attach_vgw(client, module, vpn_gateway_id):
+ params = dict()
+ params['VpcId'] = module.params.get('vpc_id')
+
+ try:
+ response = client.attach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'])
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'attached')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for vpc to attach to vgw - please check the AWS console')
+
+ result = response
+ return result
+
+
+def detach_vgw(client, module, vpn_gateway_id, vpc_id=None):
+ params = dict()
+ params['VpcId'] = module.params.get('vpc_id')
+
+ if vpc_id:
+ try:
+ response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=vpc_id)
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+ else:
+ try:
+ response = client.detach_vpn_gateway(VpnGatewayId=vpn_gateway_id, VpcId=params['VpcId'])
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ status_achieved, vgw = wait_for_status(client, module, [vpn_gateway_id], 'detached')
+ if not status_achieved:
+ module.fail_json(msg='Error waiting for vpc to detach from vgw - please check the AWS console')
+
+ result = response
+ return result
+
+
+def create_vgw(client, module):
+ params = dict()
+ params['Type'] = module.params.get('type')
+
+ try:
+ response = client.create_vpn_gateway(Type=params['Type'])
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ result = response
+ return result
+
+
+def delete_vgw(client, module, vpn_gateway_id):
+
+ try:
+ response = client.delete_vpn_gateway(VpnGatewayId=vpn_gateway_id)
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ #return the deleted VpnGatewayId as this is not included in the above response
+ result = vpn_gateway_id
+ return result
+
+
+def create_tags(client, module, vpn_gateway_id):
+ params = dict()
+
+ try:
+ response = client.create_tags(Resources=[vpn_gateway_id],Tags=load_tags(module))
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ result = response
+ return result
+
+
+def delete_tags(client, module, vpn_gateway_id, tags_to_delete=None):
+ params = dict()
+
+ if tags_to_delete:
+ try:
+ response = client.delete_tags(Resources=[vpn_gateway_id], Tags=tags_to_delete)
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+ else:
+ try:
+ response = client.delete_tags(Resources=[vpn_gateway_id])
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ result = response
+ return result
+
+
+def load_tags(module):
+ tags = []
+
+ if module.params.get('tags'):
+ for name, value in module.params.get('tags').iteritems():
+ tags.append({'Key': name, 'Value': str(value)})
+ tags.append({'Key': "Name", 'Value': module.params.get('name')})
+ else:
+ tags.append({'Key': "Name", 'Value': module.params.get('name')})
+ return tags
+
+
+def find_tags(client, module, resource_id=None):
+
+ if resource_id:
+ try:
+ response = client.describe_tags(Filters=[
+ {'Name': 'resource-id', 'Values': [resource_id]}
+ ])
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ result = response
+ return result
+
+
+def check_tags(client, module, existing_vgw, vpn_gateway_id):
+ params = dict()
+ params['Tags'] = module.params.get('tags')
+ vgw = existing_vgw
+ changed = False
+ tags_list = {}
+
+ #format tags for comparison
+ for tags in existing_vgw[0]['Tags']:
+ if tags['Key'] != 'Name':
+ tags_list[tags['Key']] = tags['Value']
+
+ # if existing tags don't match the tags arg, delete existing and recreate with new list
+ if params['Tags'] != None and tags_list != params['Tags']:
+ delete_tags(client, module, vpn_gateway_id)
+ create_tags(client, module, vpn_gateway_id)
+ vgw = find_vgw(client, module)
+ changed = True
+
+ #if no tag args are supplied, delete any existing tags with the exception of the name tag
+ if params['Tags'] == None and tags_list != {}:
+ tags_to_delete = []
+ for tags in existing_vgw[0]['Tags']:
+ if tags['Key'] != 'Name':
+ tags_to_delete.append(tags)
+
+ delete_tags(client, module, vpn_gateway_id, tags_to_delete)
+ vgw = find_vgw(client, module)
+ changed = True
+
+ return vgw, changed
+
+
+def find_vpc(client, module):
+ params = dict()
+ params['vpc_id'] = module.params.get('vpc_id')
+
+ if params['vpc_id']:
+ try:
+ response = client.describe_vpcs(VpcIds=[params['vpc_id']])
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ result = response
+ return result
+
+
+def find_vgw(client, module, vpn_gateway_id=None):
+ params = dict()
+ params['Name'] = module.params.get('name')
+ params['Type'] = module.params.get('type')
+ params['State'] = module.params.get('state')
+
+ if params['State'] == 'present':
+ try:
+ response = client.describe_vpn_gateways(Filters=[
+ {'Name': 'type', 'Values': [params['Type']]},
+ {'Name': 'tag:Name', 'Values': [params['Name']]}
+ ])
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ else:
+ if vpn_gateway_id:
+ try:
+ response = client.describe_vpn_gateways(VpnGatewayIds=vpn_gateway_id)
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ else:
+ try:
+ response = client.describe_vpn_gateways(Filters=[
+ {'Name': 'type', 'Values': [params['Type']]},
+ {'Name': 'tag:Name', 'Values': [params['Name']]}
+ ])
+ except botocore.exceptions.ClientError:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ result = response['VpnGateways']
+ return result
+
+
+def ensure_vgw_present(client, module):
+
+# If an existing vgw name and type matches our args, then a match is considered to have been
+# found and we will not create another vgw.
+
+ changed = False
+ params = dict()
+ result = dict()
+ params['Name'] = module.params.get('name')
+ params['VpcId'] = module.params.get('vpc_id')
+ params['Type'] = module.params.get('type')
+ params['Tags'] = module.params.get('tags')
+ params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
+
+ # Check that a name argument has been supplied.
+ if not module.params.get('name'):
+ module.fail_json(msg='A name is required when a status of \'present\' is suppled')
+
+ # check if a gateway matching our module args already exists
+ existing_vgw = find_vgw(client, module)
+
+ if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted':
+ vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
+ vgw, changed = check_tags(client, module, existing_vgw, vpn_gateway_id)
+
+ # if a vpc_id was provided, check if it exists and if it's attached
+ if params['VpcId']:
+
+ # check that the vpc_id exists. If not, an exception is thrown
+ vpc = find_vpc(client, module)
+ current_vpc_attachments = existing_vgw[0]['VpcAttachments']
+
+ if current_vpc_attachments != [] and current_vpc_attachments[0]['State'] == 'attached':
+ if current_vpc_attachments[0]['VpcId'] == params['VpcId'] and current_vpc_attachments[0]['State'] == 'attached':
+ changed = False
+ else:
+
+ # detach the existing vpc from the virtual gateway
+ vpc_to_detach = current_vpc_attachments[0]['VpcId']
+ detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
+ time.sleep(5)
+ attached_vgw = attach_vgw(client, module, vpn_gateway_id)
+ vgw = find_vgw(client, module, [vpn_gateway_id])
+ changed = True
+ else:
+ # attach the vgw to the supplied vpc
+ attached_vgw = attach_vgw(client, module, vpn_gateway_id)
+ vgw = find_vgw(client, module, [vpn_gateway_id])
+ changed = True
+
+ # if params['VpcId'] is not provided, check the vgw is attached to a vpc. if so, detach it.
+ else:
+ existing_vgw = find_vgw(client, module, [vpn_gateway_id])
+
+ if existing_vgw[0]['VpcAttachments'] != []:
+ if existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
+ # detach the vpc from the vgw
+ vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
+ detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
+ changed = True
+
+ vgw = find_vgw(client, module, [vpn_gateway_id])
+
+ else:
+ # create a new vgw
+ new_vgw = create_vgw(client, module)
+ changed = True
+ vpn_gateway_id = new_vgw['VpnGateway']['VpnGatewayId']
+
+ # tag the new virtual gateway
+ create_tags(client, module, vpn_gateway_id)
+
+ # return current state of the vgw
+ vgw = find_vgw(client, module, [vpn_gateway_id])
+
+ # if a vpc-id was supplied, attempt to attach it to the vgw
+ if params['VpcId']:
+ attached_vgw = attach_vgw(client, module, vpn_gateway_id)
+ changed = True
+ vgw = find_vgw(client, module, [vpn_gateway_id])
+
+ result = get_vgw_info(vgw)
+ return changed, result
+
+
+def ensure_vgw_absent(client, module):
+
+# If an existing vgw name and type matches our args, then a match is considered to have been
+# found and we will take steps to delete it.
+
+ changed = False
+ params = dict()
+ result = dict()
+ params['Name'] = module.params.get('name')
+ params['VpcId'] = module.params.get('vpc_id')
+ params['Type'] = module.params.get('type')
+ params['Tags'] = module.params.get('tags')
+ params['VpnGatewayIds'] = module.params.get('vpn_gateway_id')
+
+ # check if a gateway matching our module args already exists
+ if params['VpnGatewayIds']:
+ existing_vgw_with_id = find_vgw(client, module, [params['VpnGatewayIds']])
+ if existing_vgw_with_id != [] and existing_vgw_with_id[0]['State'] != 'deleted':
+ existing_vgw = existing_vgw_with_id
+ if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
+ if params['VpcId']:
+ if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
+ module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
+
+ else:
+ # detach the vpc from the vgw
+ detach_vgw(client, module, params['VpnGatewayIds'], params['VpcId'])
+ deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
+ changed = True
+
+ else:
+ # attempt to detach any attached vpcs
+ vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
+ detach_vgw(client, module, params['VpnGatewayIds'], vpc_to_detach)
+ deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
+ changed = True
+
+ else:
+ # no vpc's are attached so attempt to delete the vgw
+ deleted_vgw = delete_vgw(client, module, params['VpnGatewayIds'])
+ changed = True
+
+ else:
+ changed = False
+ deleted_vgw = "Nothing to do"
+
+ else:
+ #Check that a name and type argument has been supplied if no vgw-id
+ if not module.params.get('name') or not module.params.get('type'):
+ module.fail_json(msg='A name and type is required when no vgw-id and a status of \'absent\' is suppled')
+
+ existing_vgw = find_vgw(client, module)
+ if existing_vgw != [] and existing_vgw[0]['State'] != 'deleted':
+ vpn_gateway_id = existing_vgw[0]['VpnGatewayId']
+ if existing_vgw[0]['VpcAttachments'] != [] and existing_vgw[0]['VpcAttachments'][0]['State'] == 'attached':
+ if params['VpcId']:
+ if params['VpcId'] != existing_vgw[0]['VpcAttachments'][0]['VpcId']:
+ module.fail_json(msg='The vpc-id provided does not match the vpc-id currently attached - please check the AWS console')
+
+ else:
+ # detach the vpc from the vgw
+ detach_vgw(client, module, vpn_gateway_id, params['VpcId'])
+
+ #now that the vpc has been detached, delete the vgw
+ deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
+ changed = True
+
+ else:
+ # attempt to detach any attached vpcs
+ vpc_to_detach = existing_vgw[0]['VpcAttachments'][0]['VpcId']
+ detach_vgw(client, module, vpn_gateway_id, vpc_to_detach)
+ changed = True
+
+ #now that the vpc has been detached, delete the vgw
+ deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
+
+ else:
+ # no vpc's are attached so attempt to delete the vgw
+ deleted_vgw = delete_vgw(client, module, vpn_gateway_id)
+ changed = True
+
+ else:
+ changed = False
+ deleted_vgw = None
+
+ result = deleted_vgw
+ return changed, result
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ region=dict(required=True),
+ name=dict(),
+ vpn_gateway_id=dict(),
+ vpc_id=dict(),
+ wait_timeout=dict(type='int', default=320),
+ type=dict(default='ipsec.1', choices=['ipsec.1']),
+ tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='json and boto3 is required.')
+
+ state = module.params.get('state').lower()
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ client = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except botocore.exceptions.NoCredentialsError:
+ e = get_exception()
+ module.fail_json(msg="Can't authorize connection - "+str(e))
+
+ if state == 'present':
+ (changed, results) = ensure_vgw_present(client, module)
+ else:
+ (changed, results) = ensure_vgw_absent(client, module)
+ module.exit_json(changed=changed, vgw=results)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
+
diff --git a/lib/ansible/modules/cloud/amazon/ec2_win_password.py b/lib/ansible/modules/cloud/amazon/ec2_win_password.py
new file mode 100644
index 0000000000..4d246d4367
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ec2_win_password.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ec2_win_password
+short_description: gets the default administrator password for ec2 windows instances
+description:
+ - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. i-XXXXXXX). This module has a dependency on python-boto.
+version_added: "2.0"
+author: "Rick Mendes (@rickmendes)"
+options:
+ instance_id:
+ description:
+ - The instance id to get the password data from.
+ required: true
+ key_file:
+ description:
+ - Path to the file containing the key pair used on the instance.
+ required: true
+ key_passphrase:
+ version_added: "2.0"
+ description:
+ - The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3.
+ required: false
+ default: null
+ wait:
+ version_added: "2.0"
+ description:
+ - Whether or not to wait for the password to be available before returning.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ version_added: "2.0"
+ description:
+ - Number of seconds to wait before giving up.
+ required: false
+ default: 120
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Example of getting a password
+tasks:
+- name: get the Administrator password
+ ec2_win_password:
+ profile: my-boto-profile
+ instance_id: i-XXXXXX
+ region: us-east-1
+ key_file: "~/aws-creds/my_test_key.pem"
+
+# Example of getting a password with a password protected key
+tasks:
+- name: get the Administrator password
+ ec2_win_password:
+ profile: my-boto-profile
+ instance_id: i-XXXXXX
+ region: us-east-1
+ key_file: "~/aws-creds/my_protected_test_key.pem"
+ key_passphrase: "secret"
+
+# Example of waiting for a password
+tasks:
+- name: get the Administrator password
+ ec2_win_password:
+ profile: my-boto-profile
+ instance_id: i-XXXXXX
+ region: us-east-1
+ key_file: "~/aws-creds/my_test_key.pem"
+ wait: yes
+ wait_timeout: 45
+'''
+
+from base64 import b64decode
+from os.path import expanduser
+from Crypto.Cipher import PKCS1_v1_5
+from Crypto.PublicKey import RSA
+import datetime
+
+try:
+ import boto.ec2
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ instance_id = dict(required=True),
+ key_file = dict(required=True),
+ key_passphrase = dict(no_log=True, default=None, required=False),
+ wait = dict(type='bool', default=False, required=False),
+ wait_timeout = dict(default=120, required=False),
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='Boto required for this module.')
+
+ instance_id = module.params.get('instance_id')
+ key_file = expanduser(module.params.get('key_file'))
+ key_passphrase = module.params.get('key_passphrase')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+
+ ec2 = ec2_connect(module)
+
+ if wait:
+ start = datetime.datetime.now()
+ end = start + datetime.timedelta(seconds=wait_timeout)
+
+ while datetime.datetime.now() < end:
+ data = ec2.get_password_data(instance_id)
+ decoded = b64decode(data)
+ if wait and not decoded:
+ time.sleep(5)
+ else:
+ break
+ else:
+ data = ec2.get_password_data(instance_id)
+ decoded = b64decode(data)
+
+ if wait and datetime.datetime.now() >= end:
+ module.fail_json(msg = "wait for password timeout after %d seconds" % wait_timeout)
+
+ try:
+ f = open(key_file, 'r')
+ except IOError as e:
+ module.fail_json(msg = "I/O error (%d) opening key file: %s" % (e.errno, e.strerror))
+ else:
+ try:
+ with f:
+ key = RSA.importKey(f.read(), key_passphrase)
+ except (ValueError, IndexError, TypeError) as e:
+ module.fail_json(msg = "unable to parse key file")
+
+ cipher = PKCS1_v1_5.new(key)
+ sentinel = 'password decryption failed!!!'
+
+ try:
+ decrypted = cipher.decrypt(decoded, sentinel)
+ except ValueError as e:
+ decrypted = None
+
+ if decrypted == None:
+ module.exit_json(win_password='', changed=False)
+ else:
+ if wait:
+ elapsed = datetime.datetime.now() - start
+ module.exit_json(win_password=decrypted, changed=True, elapsed=elapsed.seconds)
+ else:
+ module.exit_json(win_password=decrypted, changed=True)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ecs_cluster.py b/lib/ansible/modules/cloud/amazon/ecs_cluster.py
new file mode 100644
index 0000000000..b1409005a8
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ecs_cluster.py
@@ -0,0 +1,243 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ecs_cluster
+short_description: create or terminate ecs clusters
+notes:
+ - When deleting a cluster, the information returned is the state of the cluster prior to deletion.
+ - It will also wait for a cluster to have instances registered to it.
+description:
+ - Creates or terminates ecs clusters.
+version_added: "2.0"
+author: Mark Chance(@Java1Guy)
+requirements: [ boto, boto3 ]
+options:
+ state:
+ description:
+ - The desired state of the cluster
+ required: true
+ choices: ['present', 'absent', 'has_instances']
+ name:
+ description:
+ - The cluster name
+ required: true
+ delay:
+ description:
+ - Number of seconds to wait
+ required: false
+ repeat:
+ description:
+ - The number of times to wait for the cluster to have an instance
+ required: false
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Cluster creation
+- ecs_cluster:
+ name: default
+ state: present
+
+# Cluster deletion
+- ecs_cluster:
+ name: default
+ state: absent
+
+- name: Wait for register
+ ecs_cluster:
+ name: "{{ new_cluster }}"
+ state: has_instances
+ delay: 10
+ repeat: 10
+ register: task_output
+
+'''
+RETURN = '''
+activeServicesCount:
+ description: how many services are active in this cluster
+ returned: 0 if a new cluster
+ type: int
+clusterArn:
+ description: the ARN of the cluster just created
+ type: string (ARN)
+ sample: arn:aws:ecs:us-west-2:172139249013:cluster/test-cluster-mfshcdok
+clusterName:
+ description: name of the cluster just created (should match the input argument)
+ type: string
+ sample: test-cluster-mfshcdok
+pendingTasksCount:
+ description: how many tasks are waiting to run in this cluster
+ returned: 0 if a new cluster
+ type: int
+registeredContainerInstancesCount:
+ description: how many container instances are available in this cluster
+ returned: 0 if a new cluster
+ type: int
+runningTasksCount:
+ description: how many tasks are running in this cluster
+ returned: 0 if a new cluster
+ type: int
+status:
+ description: the status of the new cluster
+ returned: ACTIVE
+ type: string
+'''
+import time
+
+try:
+ import boto
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
+
+
+class EcsClusterManager:
+ """Handles ECS Clusters"""
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ # self.ecs = boto3.client('ecs')
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+ self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound as e:
+ self.module.fail_json(msg="Can't authorize connection - %s" % str(e))
+
+ def find_in_array(self, array_of_clusters, cluster_name, field_name='clusterArn'):
+ for c in array_of_clusters:
+ if c[field_name].endswith(cluster_name):
+ return c
+ return None
+
+ def describe_cluster(self, cluster_name):
+ response = self.ecs.describe_clusters(clusters=[
+ cluster_name
+ ])
+ if len(response['failures'])>0:
+ c = self.find_in_array(response['failures'], cluster_name, 'arn')
+ if c and c['reason']=='MISSING':
+ return None
+ # fall thru and look through found ones
+ if len(response['clusters'])>0:
+ c = self.find_in_array(response['clusters'], cluster_name)
+ if c:
+ return c
+ raise Exception("Unknown problem describing cluster %s." % cluster_name)
+
+ def create_cluster(self, clusterName = 'default'):
+ response = self.ecs.create_cluster(clusterName=clusterName)
+ return response['cluster']
+
+ def delete_cluster(self, clusterName):
+ return self.ecs.delete_cluster(cluster=clusterName)
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent', 'has_instances'] ),
+ name=dict(required=True, type='str' ),
+ delay=dict(required=False, type='int', default=10),
+ repeat=dict(required=False, type='int', default=10)
+ ))
+ required_together = ( ['state', 'name'] )
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required.')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required.')
+
+ cluster_mgr = EcsClusterManager(module)
+ try:
+ existing = cluster_mgr.describe_cluster(module.params['name'])
+ except Exception as e:
+ module.fail_json(msg="Exception describing cluster '"+module.params['name']+"': "+str(e))
+
+ results = dict(changed=False)
+ if module.params['state'] == 'present':
+ if existing and 'status' in existing and existing['status']=="ACTIVE":
+ results['cluster']=existing
+ else:
+ if not module.check_mode:
+ # doesn't exist. create it.
+ results['cluster'] = cluster_mgr.create_cluster(module.params['name'])
+ results['changed'] = True
+
+ # delete the cluster
+ elif module.params['state'] == 'absent':
+ if not existing:
+ pass
+ else:
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ results['cluster'] = existing
+ if 'status' in existing and existing['status']=="INACTIVE":
+ results['changed'] = False
+ else:
+ if not module.check_mode:
+ cluster_mgr.delete_cluster(module.params['name'])
+ results['changed'] = True
+ elif module.params['state'] == 'has_instances':
+ if not existing:
+ module.fail_json(msg="Cluster '"+module.params['name']+" not found.")
+ return
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ delay = module.params['delay']
+ repeat = module.params['repeat']
+ time.sleep(delay)
+ count = 0
+ for i in range(repeat):
+ existing = cluster_mgr.describe_cluster(module.params['name'])
+ count = existing['registeredContainerInstancesCount']
+ if count > 0:
+ results['changed'] = True
+ break
+ time.sleep(delay)
+ if count == 0 and i is repeat-1:
+ module.fail_json(msg="Cluster instance count still zero after "+str(repeat)+" tries of "+str(delay)+" seconds each.")
+ return
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ecs_service.py b/lib/ansible/modules/cloud/amazon/ecs_service.py
new file mode 100644
index 0000000000..004a11b267
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ecs_service.py
@@ -0,0 +1,433 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ecs_service
+short_description: create, terminate, start or stop a service in ecs
+description:
+ - Creates or terminates ecs services.
+notes:
+ - the service role specified must be assumable (i.e. have a trust relationship for the ecs service, ecs.amazonaws.com)
+ - for details of the parameters and returns see U(http://boto3.readthedocs.org/en/latest/reference/services/ecs.html)
+dependencies:
+ - An IAM role must have been created
+version_added: "2.1"
+author:
+ - "Mark Chance (@java1guy)"
+ - "Darek Kaczynski (@kaczynskid)"
+requirements: [ json, boto, botocore, boto3 ]
+options:
+ state:
+ description:
+ - The desired state of the service
+ required: true
+ choices: ["present", "absent", "deleting"]
+ name:
+ description:
+ - The name of the service
+ required: true
+ cluster:
+ description:
+ - The name of the cluster in which the service exists
+ required: false
+ task_definition:
+ description:
+ - The task definition the service will run
+ required: false
+ load_balancers:
+ description:
+ - The list of ELBs defined for this service
+ required: false
+
+ desired_count:
+ description:
+ - The count of how many instances of the service
+ required: false
+ client_token:
+ description:
+ - Unique, case-sensitive identifier you provide to ensure the idempotency of the request. Up to 32 ASCII characters are allowed.
+ required: false
+ role:
+ description:
+ - The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer on your behalf. This parameter is only required if you are using a load balancer with your service.
+ required: false
+ delay:
+ description:
+ - The time to wait before checking that the service is available
+ required: false
+ default: 10
+ repeat:
+ description:
+ - The number of times to check that the service is available
+ required: false
+ default: 10
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+- ecs_service:
+ state: present
+ name: console-test-service
+ cluster: new_cluster
+ task_definition: new_cluster-task:1"
+ desired_count: 0
+
+# Basic provisioning example
+- ecs_service:
+ name: default
+ state: present
+ cluster: new_cluster
+
+# Simple example to delete
+- ecs_service:
+ name: default
+ state: absent
+ cluster: new_cluster
+'''
+
+RETURN = '''
+service:
+ description: Details of created service.
+ returned: when creating a service
+ type: complex
+ contains:
+ clusterArn:
+ description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
+ returned: always
+ type: string
+ desiredCount:
+ description: The desired number of instantiations of the task definition to keep running on the service.
+ returned: always
+ type: int
+ loadBalancers:
+ description: A list of load balancer objects
+ returned: always
+ type: complex
+ contains:
+ loadBalancerName:
+ description: the name
+ returned: always
+ type: string
+ containerName:
+ description: The name of the container to associate with the load balancer.
+ returned: always
+ type: string
+ containerPort:
+ description: The port on the container to associate with the load balancer.
+ returned: always
+ type: int
+ pendingCount:
+ description: The number of tasks in the cluster that are in the PENDING state.
+ returned: always
+ type: int
+ runningCount:
+ description: The number of tasks in the cluster that are in the RUNNING state.
+ returned: always
+ type: int
+ serviceArn:
+ description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service .
+ returned: always
+ type: string
+ serviceName:
+ description: A user-generated string used to identify the service
+ returned: always
+ type: string
+ status:
+ description: The valid values are ACTIVE, DRAINING, or INACTIVE.
+ returned: always
+ type: string
+ taskDefinition:
+ description: The ARN of a task definition to use for tasks in the service.
+ returned: always
+ type: string
+ deployments:
+ description: list of service deployments
+ returned: always
+ type: list of complex
+ events:
+ description: lost of service events
+ returned: always
+ type: list of complex
+ansible_facts:
+ description: Facts about deleted service.
+ returned: when deleting a service
+ type: complex
+ contains:
+ service:
+ description: Details of deleted service in the same structure described above for service creation.
+ returned: when service existed and was deleted
+ type: complex
+'''
+import time
+
+try:
+ import boto
+ import botocore
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
+
+
+class EcsServiceManager:
+ """Handles ECS Services"""
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ # self.ecs = boto3.client('ecs')
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+ self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound as e:
+ self.module.fail_json(msg="Can't authorize connection - %s" % str(e))
+
+ # def list_clusters(self):
+ # return self.client.list_clusters()
+ # {'failures=[],
+ # 'ResponseMetadata={'HTTPStatusCode=200, 'RequestId='ce7b5880-1c41-11e5-8a31-47a93a8a98eb'},
+ # 'clusters=[{'activeServicesCount=0, 'clusterArn='arn:aws:ecs:us-west-2:777110527155:cluster/default', 'status='ACTIVE', 'pendingTasksCount=0, 'runningTasksCount=0, 'registeredContainerInstancesCount=0, 'clusterName='default'}]}
+ # {'failures=[{'arn='arn:aws:ecs:us-west-2:777110527155:cluster/bogus', 'reason='MISSING'}],
+ # 'ResponseMetadata={'HTTPStatusCode=200, 'RequestId='0f66c219-1c42-11e5-8a31-47a93a8a98eb'},
+ # 'clusters=[]}
+
+ def find_in_array(self, array_of_services, service_name, field_name='serviceArn'):
+ for c in array_of_services:
+ if c[field_name].endswith(service_name):
+ return c
+ return None
+
+ def describe_service(self, cluster_name, service_name):
+ response = self.ecs.describe_services(
+ cluster=cluster_name,
+ services=[
+ service_name
+ ])
+ msg = ''
+ if len(response['failures'])>0:
+ c = self.find_in_array(response['failures'], service_name, 'arn')
+ msg += ", failure reason is "+c['reason']
+ if c and c['reason']=='MISSING':
+ return None
+ # fall thru and look through found ones
+ if len(response['services'])>0:
+ c = self.find_in_array(response['services'], service_name)
+ if c:
+ return c
+ raise StandardError("Unknown problem describing service %s." % service_name)
+
+ def is_matching_service(self, expected, existing):
+ if expected['task_definition'] != existing['taskDefinition']:
+ return False
+
+ if (expected['load_balancers'] or []) != existing['loadBalancers']:
+ return False
+
+ if (expected['desired_count'] or 0) != existing['desiredCount']:
+ return False
+
+ return True
+
+ def create_service(self, service_name, cluster_name, task_definition,
+ load_balancers, desired_count, client_token, role):
+ response = self.ecs.create_service(
+ cluster=cluster_name,
+ serviceName=service_name,
+ taskDefinition=task_definition,
+ loadBalancers=load_balancers,
+ desiredCount=desired_count,
+ clientToken=client_token,
+ role=role)
+ return self.jsonize(response['service'])
+
+ def update_service(self, service_name, cluster_name, task_definition,
+ load_balancers, desired_count, client_token, role):
+ response = self.ecs.update_service(
+ cluster=cluster_name,
+ service=service_name,
+ taskDefinition=task_definition,
+ desiredCount=desired_count)
+ return self.jsonize(response['service'])
+
+ def jsonize(self, service):
+ # some fields are datetime which is not JSON serializable
+ # make them strings
+ if 'deployments' in service:
+ for d in service['deployments']:
+ if 'createdAt' in d:
+ d['createdAt'] = str(d['createdAt'])
+ if 'updatedAt' in d:
+ d['updatedAt'] = str(d['updatedAt'])
+ if 'events' in service:
+ for e in service['events']:
+ if 'createdAt' in e:
+ e['createdAt'] = str(e['createdAt'])
+ return service
+
+ def delete_service(self, service, cluster=None):
+ return self.ecs.delete_service(cluster=cluster, service=service)
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent', 'deleting'] ),
+ name=dict(required=True, type='str' ),
+ cluster=dict(required=False, type='str' ),
+ task_definition=dict(required=False, type='str' ),
+ load_balancers=dict(required=False, type='list' ),
+ desired_count=dict(required=False, type='int' ),
+ client_token=dict(required=False, type='str' ),
+ role=dict(required=False, type='str' ),
+ delay=dict(required=False, type='int', default=10),
+ repeat=dict(required=False, type='int', default=10)
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required.')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required.')
+
+ if module.params['state'] == 'present':
+ if not 'task_definition' in module.params and module.params['task_definition'] is None:
+ module.fail_json(msg="To use create a service, a task_definition must be specified")
+ if not 'desired_count' in module.params and module.params['desired_count'] is None:
+ module.fail_json(msg="To use create a service, a desired_count must be specified")
+
+ service_mgr = EcsServiceManager(module)
+ try:
+ existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
+ except Exception as e:
+ module.fail_json(msg="Exception describing service '"+module.params['name']+"' in cluster '"+module.params['cluster']+"': "+str(e))
+
+ results = dict(changed=False )
+ if module.params['state'] == 'present':
+
+ matching = False
+ update = False
+ if existing and 'status' in existing and existing['status']=="ACTIVE":
+ if service_mgr.is_matching_service(module.params, existing):
+ matching = True
+ results['service'] = service_mgr.jsonize(existing)
+ else:
+ update = True
+
+ if not matching:
+ if not module.check_mode:
+ if module.params['load_balancers'] is None:
+ loadBalancers = []
+ else:
+ loadBalancers = module.params['load_balancers']
+ if module.params['role'] is None:
+ role = ''
+ else:
+ role = module.params['role']
+ if module.params['client_token'] is None:
+ clientToken = ''
+ else:
+ clientToken = module.params['client_token']
+
+ if update:
+ # update required
+ response = service_mgr.update_service(module.params['name'],
+ module.params['cluster'],
+ module.params['task_definition'],
+ loadBalancers,
+ module.params['desired_count'],
+ clientToken,
+ role)
+ else:
+ # doesn't exist. create it.
+ response = service_mgr.create_service(module.params['name'],
+ module.params['cluster'],
+ module.params['task_definition'],
+ loadBalancers,
+ module.params['desired_count'],
+ clientToken,
+ role)
+
+ results['service'] = response
+
+ results['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ if not existing:
+ pass
+ else:
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ del existing['deployments']
+ del existing['events']
+ results['ansible_facts'] = existing
+ if 'status' in existing and existing['status']=="INACTIVE":
+ results['changed'] = False
+ else:
+ if not module.check_mode:
+ try:
+ service_mgr.delete_service(
+ module.params['name'],
+ module.params['cluster']
+ )
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg=e.message)
+ results['changed'] = True
+
+ elif module.params['state'] == 'deleting':
+ if not existing:
+ module.fail_json(msg="Service '"+module.params['name']+" not found.")
+ return
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ delay = module.params['delay']
+ repeat = module.params['repeat']
+ time.sleep(delay)
+ for i in range(repeat):
+ existing = service_mgr.describe_service(module.params['cluster'], module.params['name'])
+ status = existing['status']
+ if status == "INACTIVE":
+ results['changed'] = True
+ break
+ time.sleep(delay)
+ if i is repeat-1:
+ module.fail_json(msg="Service still not deleted after "+str(repeat)+" tries of "+str(delay)+" seconds each.")
+ return
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ecs_service_facts.py b/lib/ansible/modules/cloud/amazon/ecs_service_facts.py
new file mode 100644
index 0000000000..e62b492c4b
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ecs_service_facts.py
@@ -0,0 +1,240 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ecs_service_facts
+short_description: list or describe services in ecs
+notes:
+ - for details of the parameters and returns see U(http://boto3.readthedocs.org/en/latest/reference/services/ecs.html)
+description:
+ - Lists or describes services in ecs.
+version_added: "2.1"
+author:
+ - "Mark Chance (@java1guy)"
+ - "Darek Kaczynski (@kaczynskid)"
+requirements: [ json, boto, botocore, boto3 ]
+options:
+ details:
+ description:
+ - Set this to true if you want detailed information about the services.
+ required: false
+ default: 'false'
+ choices: ['true', 'false']
+ cluster:
+ description:
+ - The cluster ARNS in which to list the services.
+ required: false
+ default: 'default'
+ service:
+ description:
+ - The service to get details for (required if details is true)
+ required: false
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic listing example
+- ecs_service_facts:
+ cluster: test-cluster
+ service: console-test-service
+ details: true
+
+# Basic listing example
+- ecs_service_facts:
+ cluster: test-cluster
+'''
+
+RETURN = '''
+services:
+ description: When details is false, returns an array of service ARNs, otherwise an array of complex objects as described below.
+ returned: success
+ type: list of complex
+ contains:
+ clusterArn:
+ description: The Amazon Resource Name (ARN) of the of the cluster that hosts the service.
+ returned: always
+ type: string
+ desiredCount:
+ description: The desired number of instantiations of the task definition to keep running on the service.
+ returned: always
+ type: int
+ loadBalancers:
+ description: A list of load balancer objects
+ returned: always
+ type: complex
+ contains:
+ loadBalancerName:
+ description: the name
+ returned: always
+ type: string
+ containerName:
+ description: The name of the container to associate with the load balancer.
+ returned: always
+ type: string
+ containerPort:
+ description: The port on the container to associate with the load balancer.
+ returned: always
+ type: int
+ pendingCount:
+ description: The number of tasks in the cluster that are in the PENDING state.
+ returned: always
+ type: int
+ runningCount:
+ description: The number of tasks in the cluster that are in the RUNNING state.
+ returned: always
+ type: int
+ serviceArn:
+ description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service .
+ returned: always
+ type: string
+ serviceName:
+ description: A user-generated string used to identify the service
+ returned: always
+ type: string
+ status:
+ description: The valid values are ACTIVE, DRAINING, or INACTIVE.
+ returned: always
+ type: string
+ taskDefinition:
+ description: The ARN of a task definition to use for tasks in the service.
+ returned: always
+ type: string
+ deployments:
+ description: list of service deployments
+ returned: always
+ type: list of complex
+ events:
+ description: lost of service events
+ returned: always
+ type: list of complex
+'''
+try:
+ import boto
+ import botocore
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
+
+
+class EcsServiceManager:
+ """Handles ECS Services"""
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ # self.ecs = boto3.client('ecs')
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+ self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound as e:
+ self.module.fail_json(msg="Can't authorize connection - %s" % str(e))
+
+ # def list_clusters(self):
+ # return self.client.list_clusters()
+ # {'failures': [],
+ # 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'ce7b5880-1c41-11e5-8a31-47a93a8a98eb'},
+ # 'clusters': [{'activeServicesCount': 0, 'clusterArn': 'arn:aws:ecs:us-west-2:777110527155:cluster/default', 'status': 'ACTIVE', 'pendingTasksCount': 0, 'runningTasksCount': 0, 'registeredContainerInstancesCount': 0, 'clusterName': 'default'}]}
+ # {'failures': [{'arn': 'arn:aws:ecs:us-west-2:777110527155:cluster/bogus', 'reason': 'MISSING'}],
+ # 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '0f66c219-1c42-11e5-8a31-47a93a8a98eb'},
+ # 'clusters': []}
+
+ def list_services(self, cluster):
+ fn_args = dict()
+ if cluster and cluster is not None:
+ fn_args['cluster'] = cluster
+ response = self.ecs.list_services(**fn_args)
+ relevant_response = dict(services = response['serviceArns'])
+ return relevant_response
+
+ def describe_services(self, cluster, services):
+ fn_args = dict()
+ if cluster and cluster is not None:
+ fn_args['cluster'] = cluster
+ fn_args['services']=services.split(",")
+ response = self.ecs.describe_services(**fn_args)
+ relevant_response = dict(services = map(self.extract_service_from, response['services']))
+ if 'failures' in response and len(response['failures'])>0:
+ relevant_response['services_not_running'] = response['failures']
+ return relevant_response
+
+ def extract_service_from(self, service):
+ # some fields are datetime which is not JSON serializable
+ # make them strings
+ if 'deployments' in service:
+ for d in service['deployments']:
+ if 'createdAt' in d:
+ d['createdAt'] = str(d['createdAt'])
+ if 'updatedAt' in d:
+ d['updatedAt'] = str(d['updatedAt'])
+ if 'events' in service:
+ for e in service['events']:
+ if 'createdAt' in e:
+ e['createdAt'] = str(e['createdAt'])
+ return service
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ details=dict(required=False, type='bool', default=False ),
+ cluster=dict(required=False, type='str' ),
+ service=dict(required=False, type='str' )
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required.')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required.')
+
+ show_details = module.params.get('details', False)
+
+ task_mgr = EcsServiceManager(module)
+ if show_details:
+ if 'service' not in module.params or not module.params['service']:
+ module.fail_json(msg="service must be specified for ecs_service_facts")
+ ecs_facts = task_mgr.describe_services(module.params['cluster'], module.params['service'])
+ else:
+ ecs_facts = task_mgr.list_services(module.params['cluster'])
+
+ ecs_facts_result = dict(changed=False, ansible_facts=ecs_facts)
+ module.exit_json(**ecs_facts_result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ecs_task.py b/lib/ansible/modules/cloud/amazon/ecs_task.py
new file mode 100644
index 0000000000..a8ecc4dde4
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ecs_task.py
@@ -0,0 +1,329 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ecs_task
+short_description: run, start or stop a task in ecs
+description:
+ - Creates or deletes instances of task definitions.
+version_added: "2.0"
+author: Mark Chance(@Java1Guy)
+requirements: [ json, boto, botocore, boto3 ]
+options:
+ operation:
+ description:
+ - Which task operation to execute
+ required: True
+ choices: ['run', 'start', 'stop']
+ cluster:
+ description:
+ - The name of the cluster to run the task on
+ required: False
+ task_definition:
+ description:
+ - The task definition to start or run
+ required: False
+ overrides:
+ description:
+ - A dictionary of values to pass to the new instances
+ required: False
+ count:
+ description:
+ - How many new instances to start
+ required: False
+ task:
+ description:
+ - The task to stop
+ required: False
+ container_instances:
+ description:
+ - The list of container instances on which to deploy the task
+ required: False
+ started_by:
+ description:
+ - A value showing who or what started the task (for informational purposes)
+ required: False
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Simple example of run task
+- name: Run task
+ ecs_task:
+ operation: run
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ count: 1
+ started_by: ansible_user
+ register: task_output
+
+# Simple example of start task
+
+- name: Start a task
+ ecs_task:
+ operation: start
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+ container_instances:
+ - arn:aws:ecs:us-west-2:172139249013:container-instance/79c23f22-876c-438a-bddf-55c98a3538a8
+ started_by: ansible_user
+ register: task_output
+
+- name: Stop a task
+ ecs_task:
+ operation: stop
+ cluster: console-sample-app-static-cluster
+ task_definition: console-sample-app-static-taskdef
+ task: "arn:aws:ecs:us-west-2:172139249013:task/3f8353d1-29a8-4689-bbf6-ad79937ffe8a"
+'''
+RETURN = '''
+task:
+ description: details about the tast that was started
+ returned: success
+ type: complex
+ contains:
+ taskArn:
+ description: The Amazon Resource Name (ARN) that identifies the task.
+ returned: always
+ type: string
+ clusterArn:
+ description: The Amazon Resource Name (ARN) of the of the cluster that hosts the task.
+ returned: only when details is true
+ type: string
+ taskDefinitionArn:
+ description: The Amazon Resource Name (ARN) of the task definition.
+ returned: only when details is true
+ type: string
+ containerInstanceArn:
+ description: The Amazon Resource Name (ARN) of the container running the task.
+ returned: only when details is true
+ type: string
+ overrides:
+ description: The container overrides set for this task.
+ returned: only when details is true
+ type: list of complex
+ lastStatus:
+ description: The last recorded status of the task.
+ returned: only when details is true
+ type: string
+ desiredStatus:
+ description: The desired status of the task.
+ returned: only when details is true
+ type: string
+ containers:
+ description: The container details.
+ returned: only when details is true
+ type: list of complex
+ startedBy:
+ description: The used who started the task.
+ returned: only when details is true
+ type: string
+ stoppedReason:
+ description: The reason why the task was stopped.
+ returned: only when details is true
+ type: string
+ createdAt:
+ description: The timestamp of when the task was created.
+ returned: only when details is true
+ type: string
+ startedAt:
+ description: The timestamp of when the task was started.
+ returned: only when details is true
+ type: string
+ stoppedAt:
+ description: The timestamp of when the task was stopped.
+ returned: only when details is true
+ type: string
+'''
+try:
+ import boto
+ import botocore
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
+
+
+class EcsExecManager:
+ """Handles ECS Tasks"""
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+ self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg="Can't authorize connection - %s " % str(e))
+
+ def list_tasks(self, cluster_name, service_name, status):
+ response = self.ecs.list_tasks(
+ cluster=cluster_name,
+ family=service_name,
+ desiredStatus=status
+ )
+ if len(response['taskArns'])>0:
+ for c in response['taskArns']:
+ if c.endswith(service_name):
+ return c
+ return None
+
+ def run_task(self, cluster, task_definition, overrides, count, startedBy):
+ if overrides is None:
+ overrides = dict()
+ response = self.ecs.run_task(
+ cluster=cluster,
+ taskDefinition=task_definition,
+ overrides=overrides,
+ count=count,
+ startedBy=startedBy)
+ # include tasks and failures
+ return response['tasks']
+
+ def start_task(self, cluster, task_definition, overrides, container_instances, startedBy):
+ args = dict()
+ if cluster:
+ args['cluster'] = cluster
+ if task_definition:
+ args['taskDefinition']=task_definition
+ if overrides:
+ args['overrides']=overrides
+ if container_instances:
+ args['containerInstances']=container_instances
+ if startedBy:
+ args['startedBy']=startedBy
+ response = self.ecs.start_task(**args)
+ # include tasks and failures
+ return response['tasks']
+
+ def stop_task(self, cluster, task):
+ response = self.ecs.stop_task(cluster=cluster, task=task)
+ return response['task']
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ operation=dict(required=True, choices=['run', 'start', 'stop'] ),
+ cluster=dict(required=False, type='str' ), # R S P
+ task_definition=dict(required=False, type='str' ), # R* S*
+ overrides=dict(required=False, type='dict'), # R S
+ count=dict(required=False, type='int' ), # R
+ task=dict(required=False, type='str' ), # P*
+ container_instances=dict(required=False, type='list'), # S*
+ started_by=dict(required=False, type='str' ) # R S
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ # Validate Requirements
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required.')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required.')
+
+ # Validate Inputs
+ if module.params['operation'] == 'run':
+ if not 'task_definition' in module.params and module.params['task_definition'] is None:
+ module.fail_json(msg="To run a task, a task_definition must be specified")
+ task_to_list = module.params['task_definition']
+ status_type = "RUNNING"
+
+ if module.params['operation'] == 'start':
+ if not 'task_definition' in module.params and module.params['task_definition'] is None:
+ module.fail_json(msg="To start a task, a task_definition must be specified")
+ if not 'container_instances' in module.params and module.params['container_instances'] is None:
+ module.fail_json(msg="To start a task, container instances must be specified")
+ task_to_list = module.params['task']
+ status_type = "RUNNING"
+
+ if module.params['operation'] == 'stop':
+ if not 'task' in module.params and module.params['task'] is None:
+ module.fail_json(msg="To stop a task, a task must be specified")
+ if not 'task_definition' in module.params and module.params['task_definition'] is None:
+ module.fail_json(msg="To stop a task, a task definition must be specified")
+ task_to_list = module.params['task_definition']
+ status_type = "STOPPED"
+
+ service_mgr = EcsExecManager(module)
+ existing = service_mgr.list_tasks(module.params['cluster'], task_to_list, status_type)
+
+ results = dict(changed=False)
+ if module.params['operation'] == 'run':
+ if existing:
+ # TBD - validate the rest of the details
+ results['task']=existing
+ else:
+ if not module.check_mode:
+ results['task'] = service_mgr.run_task(
+ module.params['cluster'],
+ module.params['task_definition'],
+ module.params['overrides'],
+ module.params['count'],
+ module.params['started_by'])
+ results['changed'] = True
+
+ elif module.params['operation'] == 'start':
+ if existing:
+ # TBD - validate the rest of the details
+ results['task']=existing
+ else:
+ if not module.check_mode:
+ results['task'] = service_mgr.start_task(
+ module.params['cluster'],
+ module.params['task_definition'],
+ module.params['overrides'],
+ module.params['container_instances'],
+ module.params['started_by']
+ )
+ results['changed'] = True
+
+ elif module.params['operation'] == 'stop':
+ if existing:
+ results['task']=existing
+ else:
+ if not module.check_mode:
+ # it exists, so we should delete it and mark changed.
+ # return info about the cluster deleted
+ results['task'] = service_mgr.stop_task(
+ module.params['cluster'],
+ module.params['task']
+ )
+ results['changed'] = True
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py b/lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py
new file mode 100644
index 0000000000..4ee9003aab
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py
@@ -0,0 +1,344 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ecs_taskdefinition
+short_description: register a task definition in ecs
+description:
+ - Creates or terminates task definitions
+version_added: "2.0"
+author: Mark Chance(@Java1Guy)
+requirements: [ json, boto, botocore, boto3 ]
+options:
+ state:
+ description:
+ - State whether the task definition should exist or be deleted
+ required: true
+ choices: ['present', 'absent']
+ arn:
+ description:
+ - The arn of the task description to delete
+ required: false
+ family:
+ description:
+ - A Name that would be given to the task definition
+ required: false
+ revision:
+ description:
+ - A revision number for the task definition
+ required: False
+ type: int
+ containers:
+ description:
+ - A list of containers definitions
+ required: False
+ type: list of dicts with container definitions
+ volumes:
+ description:
+ - A list of names of volumes to be attached
+ required: False
+ type: list of name
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+- name: "Create task definition"
+ ecs_taskdefinition:
+ containers:
+ - name: simple-app
+ cpu: 10
+ essential: true
+ image: "httpd:2.4"
+ memory: 300
+ mountPoints:
+ - containerPath: /usr/local/apache2/htdocs
+ sourceVolume: my-vol
+ portMappings:
+ - containerPort: 80
+ hostPort: 80
+ - name: busybox
+ command:
+ - "/bin/sh -c \"while true; do echo '<html> <head> <title>Amazon ECS Sample App</title> <style>body {margin-top: 40px; background-color: #333;} </style> </head><body> <div style=color:white;text-align:center> <h1>Amazon ECS Sample App</h1> <h2>Congratulations!</h2> <p>Your application is now running on a container in Amazon ECS.</p>' > top; /bin/date > date ; echo '</div></body></html>' > bottom; cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done\""
+ cpu: 10
+ entryPoint:
+ - sh
+ - "-c"
+ essential: false
+ image: busybox
+ memory: 200
+ volumesFrom:
+ - sourceContainer: simple-app
+ volumes:
+ - name: my-vol
+ family: test-cluster-taskdef
+ state: present
+ register: task_output
+'''
+RETURN = '''
+taskdefinition:
+ description: a reflection of the input parameters
+ type: dict inputs plus revision, status, taskDefinitionArn
+'''
+try:
+ import boto
+ import botocore
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
+
+
+class EcsTaskManager:
+ """Handles ECS Tasks"""
+
+ def __init__(self, module):
+ self.module = module
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
+ self.ecs = boto3_conn(module, conn_type='client', resource='ecs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg="Can't authorize connection - " % str(e))
+
+ def describe_task(self, task_name):
+ try:
+ response = self.ecs.describe_task_definition(taskDefinition=task_name)
+ return response['taskDefinition']
+ except botocore.exceptions.ClientError:
+ return None
+
+ def register_task(self, family, container_definitions, volumes):
+ response = self.ecs.register_task_definition(family=family,
+ containerDefinitions=container_definitions, volumes=volumes)
+ return response['taskDefinition']
+
+ def describe_task_definitions(self, family):
+ data = {
+ "taskDefinitionArns": [],
+ "nextToken": None
+ }
+
+ def fetch():
+ # Boto3 is weird about params passed, so only pass nextToken if we have a value
+ params = {
+ 'familyPrefix': family
+ }
+
+ if data['nextToken']:
+ params['nextToken'] = data['nextToken']
+
+ result = self.ecs.list_task_definitions(**params)
+ data['taskDefinitionArns'] += result['taskDefinitionArns']
+ data['nextToken'] = result.get('nextToken', None)
+ return data['nextToken'] is not None
+
+ # Fetch all the arns, possibly across multiple pages
+ while fetch():
+ pass
+
+ # Return the full descriptions of the task definitions, sorted ascending by revision
+ return list(sorted([self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']], key=lambda td: td['revision']))
+
+ def deregister_task(self, taskArn):
+ response = self.ecs.deregister_task_definition(taskDefinition=taskArn)
+ return response['taskDefinition']
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ arn=dict(required=False, type='str'),
+ family=dict(required=False, type='str'),
+ revision=dict(required=False, type='int'),
+ containers=dict(required=False, type='list'),
+ volumes=dict(required=False, type='list')
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto is required.')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required.')
+
+ task_to_describe = None
+ task_mgr = EcsTaskManager(module)
+ results = dict(changed=False)
+
+ if module.params['state'] == 'present':
+ if 'containers' not in module.params or not module.params['containers']:
+ module.fail_json(msg="To use task definitions, a list of containers must be specified")
+
+ if 'family' not in module.params or not module.params['family']:
+ module.fail_json(msg="To use task definitions, a family must be specified")
+
+ family = module.params['family']
+ existing_definitions_in_family = task_mgr.describe_task_definitions(module.params['family'])
+
+ if 'revision' in module.params and module.params['revision']:
+ # The definition specifies revision. We must gurantee that an active revision of that number will result from this.
+ revision = int(module.params['revision'])
+
+ # A revision has been explicitly specified. Attempt to locate a matching revision
+ tasks_defs_for_revision = [td for td in existing_definitions_in_family if td['revision'] == revision]
+ existing = tasks_defs_for_revision[0] if len(tasks_defs_for_revision) > 0 else None
+
+ if existing and existing['status'] != "ACTIVE":
+ # We cannot reactivate an inactive revision
+ module.fail_json(msg="A task in family '%s' already exists for revsion %d, but it is inactive" % (family, revision))
+ elif not existing:
+ if len(existing_definitions_in_family) == 0 and revision != 1:
+ module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision)
+ elif existing_definitions_in_family[-1]['revision'] + 1 != revision:
+ module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" % (revision, existing_definitions_in_family[-1]['revision'] + 1))
+ else:
+ existing = None
+
+ def _right_has_values_of_left(left, right):
+ # Make sure the values are equivalent for everything left has
+ for k, v in left.iteritems():
+ if not ((not v and (k not in right or not right[k])) or (k in right and v == right[k])):
+ # We don't care about list ordering because ECS can change things
+ if isinstance(v, list) and k in right:
+ left_list = v
+ right_list = right[k] or []
+
+ if len(left_list) != len(right_list):
+ return False
+
+ for list_val in left_list:
+ if list_val not in right_list:
+ return False
+ else:
+ return False
+
+ # Make sure right doesn't have anything that left doesn't
+ for k, v in right.iteritems():
+ if v and k not in left:
+ return False
+
+ return True
+
+ def _task_definition_matches(requested_volumes, requested_containers, existing_task_definition):
+ if td['status'] != "ACTIVE":
+ return None
+
+ existing_volumes = td.get('volumes', []) or []
+
+ if len(requested_volumes) != len(existing_volumes):
+ # Nope.
+ return None
+
+ if len(requested_volumes) > 0:
+ for requested_vol in requested_volumes:
+ found = False
+
+ for actual_vol in existing_volumes:
+ if _right_has_values_of_left(requested_vol, actual_vol):
+ found = True
+ break
+
+ if not found:
+ return None
+
+ existing_containers = td.get('containerDefinitions', []) or []
+
+ if len(requested_containers) != len(existing_containers):
+ # Nope.
+ return None
+
+ for requested_container in requested_containers:
+ found = False
+
+ for actual_container in existing_containers:
+ if _right_has_values_of_left(requested_container, actual_container):
+ found = True
+ break
+
+ if not found:
+ return None
+
+ return existing_task_definition
+
+ # No revision explicitly specified. Attempt to find an active, matching revision that has all the properties requested
+ for td in existing_definitions_in_family:
+ requested_volumes = module.params.get('volumes', []) or []
+ requested_containers = module.params.get('containers', []) or []
+ existing = _task_definition_matches(requested_volumes, requested_containers, td)
+
+ if existing:
+ break
+
+ if existing:
+ # Awesome. Have an existing one. Nothing to do.
+ results['taskdefinition'] = existing
+ else:
+ if not module.check_mode:
+ # Doesn't exist. create it.
+ volumes = module.params.get('volumes', []) or []
+ results['taskdefinition'] = task_mgr.register_task(module.params['family'],
+ module.params['containers'], volumes)
+ results['changed'] = True
+
+ elif module.params['state'] == 'absent':
+ # When de-registering a task definition, we can specify the ARN OR the family and revision.
+ if module.params['state'] == 'absent':
+ if 'arn' in module.params and module.params['arn'] is not None:
+ task_to_describe = module.params['arn']
+ elif 'family' in module.params and module.params['family'] is not None and 'revision' in module.params and \
+ module.params['revision'] is not None:
+ task_to_describe = module.params['family'] + ":" + str(module.params['revision'])
+ else:
+ module.fail_json(msg="To use task definitions, an arn or family and revision must be specified")
+
+ existing = task_mgr.describe_task(task_to_describe)
+
+ if not existing:
+ pass
+ else:
+ # It exists, so we should delete it and mark changed. Return info about the task definition deleted
+ results['taskdefinition'] = existing
+ if 'status' in existing and existing['status'] == "INACTIVE":
+ results['changed'] = False
+ else:
+ if not module.check_mode:
+ task_mgr.deregister_task(task_to_describe)
+ results['changed'] = True
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/efs.py b/lib/ansible/modules/cloud/amazon/efs.py
new file mode 100644
index 0000000000..1def68daed
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/efs.py
@@ -0,0 +1,630 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: efs
+short_description: create and maintain EFS file systems
+description:
+ - Module allows create, search and destroy Amazon EFS file systems
+version_added: "2.2"
+requirements: [ boto3 ]
+author:
+ - "Ryan Sydnor (@ryansydnor)"
+ - "Artem Kazakov (@akazakov)"
+options:
+ state:
+ description:
+ - Allows to create, search and destroy Amazon EFS file system
+ required: false
+ default: 'present'
+ choices: ['present', 'absent']
+ name:
+ description:
+ - Creation Token of Amazon EFS file system. Required for create. Either name or ID required for delete.
+ required: false
+ default: None
+ id:
+ description:
+ - ID of Amazon EFS. Either name or ID required for delete.
+ required: false
+ default: None
+ performance_mode:
+ description:
+ - File system's performance mode to use. Only takes effect during creation.
+ required: false
+ default: 'general_purpose'
+ choices: ['general_purpose', 'max_io']
+ tags:
+ description:
+ - "List of tags of Amazon EFS. Should be defined as dictionary
+ In case of 'present' state with list of tags and existing EFS (matched by 'name'), tags of EFS will be replaced with provided data."
+ required: false
+ default: None
+ targets:
+ description:
+ - "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
+ - subnet_id - Mandatory. The ID of the subnet to add the mount target in.
+ - ip_address - Optional. A valid IPv4 address within the address range of the specified subnet.
+ - security_groups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified
+ This data may be modified for existing EFS using state 'present' and new list of mount targets."
+ required: false
+ default: None
+ wait:
+ description:
+ - "In case of 'present' state should wait for EFS 'available' life cycle state (of course, if current state not 'deleting' or 'deleted')
+ In case of 'absent' state should wait for EFS 'deleted' life cycle state"
+ required: false
+ default: "no"
+ choices: ["yes", "no"]
+ wait_timeout:
+ description:
+ - How long the module should wait (in seconds) for desired state before returning. Zero means wait as long as necessary.
+ required: false
+ default: 0
+extends_documentation_fragment:
+ - aws
+'''
+
+EXAMPLES = '''
+# EFS provisioning
+- efs:
+ state: present
+ name: myTestEFS
+ tags:
+ name: myTestNameTag
+ purpose: file-storage
+ targets:
+ - subnet_id: subnet-748c5d03
+ security_groups: [ "sg-1a2b3c4d" ]
+
+# Modifying EFS data
+- efs:
+ state: present
+ name: myTestEFS
+ tags:
+ name: myAnotherTestTag
+ targets:
+ - subnet_id: subnet-7654fdca
+ security_groups: [ "sg-4c5d6f7a" ]
+
+# Deleting EFS
+- efs:
+ state: absent
+ name: myTestEFS
+'''
+
+RETURN = '''
+creation_time:
+ description: timestamp of creation date
+ returned:
+ type: datetime
+ sample: 2015-11-16 07:30:57-05:00
+creation_token:
+ description: EFS creation token
+ returned:
+ type: UUID
+ sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
+file_system_id:
+ description: ID of the file system
+ returned:
+ type: unique ID
+ sample: fs-xxxxxxxx
+life_cycle_state:
+ description: state of the EFS file system
+ returned:
+ type: str
+ sample: creating, available, deleting, deleted
+mount_point:
+ description: url of file system
+ returned:
+ type: str
+ sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
+mount_targets:
+ description: list of mount targets
+ returned:
+ type: list of dicts
+ sample:
+ [
+ {
+ "file_system_id": "fs-a7ad440e",
+ "ip_address": "172.31.17.173",
+ "life_cycle_state": "available",
+ "mount_target_id": "fsmt-d8907871",
+ "network_interface_id": "eni-6e387e26",
+ "owner_id": "740748460359",
+ "security_groups": [
+ "sg-a30b22c6"
+ ],
+ "subnet_id": "subnet-e265c895"
+ },
+ ...
+ ]
+name:
+ description: name of the file system
+ returned:
+ type: str
+ sample: my-efs
+number_of_mount_targets:
+ description: the number of targets mounted
+ returned:
+ type: int
+ sample: 3
+owner_id:
+ description: AWS account ID of EFS owner
+ returned:
+ type: str
+ sample: XXXXXXXXXXXX
+size_in_bytes:
+ description: size of the file system in bytes as of a timestamp
+ returned:
+ type: dict
+ sample:
+ {
+ "timestamp": "2015-12-21 13:59:59-05:00",
+ "value": 12288
+ }
+performance_mode:
+ description: performance mode of the file system
+ returned:
+ type: str
+ sample: "generalPurpose"
+tags:
+ description: tags on the efs instance
+ returned:
+ type: dict
+ sample:
+ {
+ "name": "my-efs",
+ "key": "Value"
+ }
+
+'''
+
+import sys
+from time import sleep
+from time import time as timestamp
+from collections import defaultdict
+
+try:
+ from botocore.exceptions import ClientError
+ import boto3
+ HAS_BOTO3 = True
+except ImportError as e:
+ HAS_BOTO3 = False
+
+
+class EFSConnection(object):
+
+ DEFAULT_WAIT_TIMEOUT_SECONDS = 0
+
+ STATE_CREATING = 'creating'
+ STATE_AVAILABLE = 'available'
+ STATE_DELETING = 'deleting'
+ STATE_DELETED = 'deleted'
+
+ def __init__(self, module, region, **aws_connect_params):
+ try:
+ self.connection = boto3_conn(module, conn_type='client',
+ resource='efs', region=region,
+ **aws_connect_params)
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
+
+ self.region = region
+ self.wait = module.params.get('wait')
+ self.wait_timeout = module.params.get('wait_timeout')
+
+ def get_file_systems(self, **kwargs):
+ """
+ Returns generator of file systems including all attributes of FS
+ """
+ items = iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ **kwargs
+ )
+ for item in items:
+ item['CreationTime'] = str(item['CreationTime'])
+ """
+ Suffix of network path to be used as NFS device for mount. More detail here:
+ http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
+ """
+ item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
+ if 'Timestamp' in item['SizeInBytes']:
+ item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
+ if item['LifeCycleState'] == self.STATE_AVAILABLE:
+ item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
+ item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
+ else:
+ item['Tags'] = {}
+ item['MountTargets'] = []
+ yield item
+
+ def get_tags(self, **kwargs):
+ """
+ Returns tag list for selected instance of EFS
+ """
+ tags = iterate_all(
+ 'Tags',
+ self.connection.describe_tags,
+ **kwargs
+ )
+ return dict((tag['Key'], tag['Value']) for tag in tags)
+
+ def get_mount_targets(self, **kwargs):
+ """
+ Returns mount targets for selected instance of EFS
+ """
+ targets = iterate_all(
+ 'MountTargets',
+ self.connection.describe_mount_targets,
+ **kwargs
+ )
+ for target in targets:
+ if target['LifeCycleState'] == self.STATE_AVAILABLE:
+ target['SecurityGroups'] = list(self.get_security_groups(
+ MountTargetId=target['MountTargetId']
+ ))
+ else:
+ target['SecurityGroups'] = []
+ yield target
+
+ def get_security_groups(self, **kwargs):
+ """
+ Returns security groups for selected instance of EFS
+ """
+ return iterate_all(
+ 'SecurityGroups',
+ self.connection.describe_mount_target_security_groups,
+ **kwargs
+ )
+
+ def get_file_system_id(self, name):
+ """
+ Returns ID of instance by instance name
+ """
+ info = first_or_default(iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ CreationToken=name
+ ))
+ return info and info['FileSystemId'] or None
+
+ def get_file_system_state(self, name, file_system_id=None):
+ """
+ Returns state of filesystem by EFS id/name
+ """
+ info = first_or_default(iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ CreationToken=name,
+ FileSystemId=file_system_id
+ ))
+ return info and info['LifeCycleState'] or self.STATE_DELETED
+
+ def get_mount_targets_in_state(self, file_system_id, states=None):
+ """
+ Returns states of mount targets of selected EFS with selected state(s) (optional)
+ """
+ targets = iterate_all(
+ 'MountTargets',
+ self.connection.describe_mount_targets,
+ FileSystemId=file_system_id
+ )
+
+ if states:
+ if not isinstance(states, list):
+ states = [states]
+ targets = filter(lambda target: target['LifeCycleState'] in states, targets)
+
+ return list(targets)
+
+ def create_file_system(self, name, performance_mode):
+ """
+ Creates new filesystem with selected name
+ """
+ changed = False
+ state = self.get_file_system_state(name)
+ if state in [self.STATE_DELETING, self.STATE_DELETED]:
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_DELETED
+ )
+ self.connection.create_file_system(CreationToken=name, PerformanceMode=performance_mode)
+ changed = True
+
+ # we always wait for the state to be available when creating.
+ # if we try to take any actions on the file system before it's available
+ # we'll throw errors
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_AVAILABLE,
+ self.wait_timeout
+ )
+
+ return changed
+
+ def converge_file_system(self, name, tags, targets):
+ """
+ Change attributes (mount targets and tags) of filesystem by name
+ """
+ result = False
+ fs_id = self.get_file_system_id(name)
+
+ if tags is not None:
+ tags_to_create, _, tags_to_delete = dict_diff(self.get_tags(FileSystemId=fs_id), tags)
+
+ if tags_to_delete:
+ self.connection.delete_tags(
+ FileSystemId=fs_id,
+ TagKeys=[item[0] for item in tags_to_delete]
+ )
+ result = True
+
+ if tags_to_create:
+ self.connection.create_tags(
+ FileSystemId=fs_id,
+ Tags=[{'Key': item[0], 'Value': item[1]} for item in tags_to_create]
+ )
+ result = True
+
+ if targets is not None:
+ incomplete_states = [self.STATE_CREATING, self.STATE_DELETING]
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
+ 0
+ )
+
+ index_by_subnet_id = lambda items: dict((item['SubnetId'], item) for item in items)
+
+ current_targets = index_by_subnet_id(self.get_mount_targets(FileSystemId=fs_id))
+ targets = index_by_subnet_id(targets)
+
+ targets_to_create, intersection, targets_to_delete = dict_diff(current_targets,
+ targets, True)
+
+ """ To modify mount target it should be deleted and created again """
+ changed = filter(
+ lambda sid: not targets_equal(['SubnetId', 'IpAddress', 'NetworkInterfaceId'],
+ current_targets[sid], targets[sid]), intersection)
+ targets_to_delete = list(targets_to_delete) + changed
+ targets_to_create = list(targets_to_create) + changed
+
+ if targets_to_delete:
+ for sid in targets_to_delete:
+ self.connection.delete_mount_target(
+ MountTargetId=current_targets[sid]['MountTargetId']
+ )
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
+ 0
+ )
+ result = True
+
+ if targets_to_create:
+ for sid in targets_to_create:
+ self.connection.create_mount_target(
+ FileSystemId=fs_id,
+ **targets[sid]
+ )
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(fs_id, incomplete_states)),
+ 0,
+ self.wait_timeout
+ )
+ result = True
+
+ security_groups_to_update = filter(
+ lambda sid: 'SecurityGroups' in targets[sid] and
+ current_targets[sid]['SecurityGroups'] != targets[sid]['SecurityGroups'],
+ intersection
+ )
+
+ if security_groups_to_update:
+ for sid in security_groups_to_update:
+ self.connection.modify_mount_target_security_groups(
+ MountTargetId=current_targets[sid]['MountTargetId'],
+ SecurityGroups=targets[sid]['SecurityGroups']
+ )
+ result = True
+
+ return result
+
+ def delete_file_system(self, name, file_system_id=None):
+ """
+ Removes EFS instance by id/name
+ """
+ result = False
+ state = self.get_file_system_state(name, file_system_id)
+ if state in [self.STATE_CREATING, self.STATE_AVAILABLE]:
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_AVAILABLE
+ )
+ if not file_system_id:
+ file_system_id = self.get_file_system_id(name)
+ self.delete_mount_targets(file_system_id)
+ self.connection.delete_file_system(FileSystemId=file_system_id)
+ result = True
+
+ if self.wait:
+ wait_for(
+ lambda: self.get_file_system_state(name),
+ self.STATE_DELETED,
+ self.wait_timeout
+ )
+
+ return result
+
+ def delete_mount_targets(self, file_system_id):
+ """
+ Removes mount targets by EFS id
+ """
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_CREATING)),
+ 0
+ )
+
+ targets = self.get_mount_targets_in_state(file_system_id, self.STATE_AVAILABLE)
+ for target in targets:
+ self.connection.delete_mount_target(MountTargetId=target['MountTargetId'])
+
+ wait_for(
+ lambda: len(self.get_mount_targets_in_state(file_system_id, self.STATE_DELETING)),
+ 0
+ )
+
+ return len(targets) > 0
+
+
+def iterate_all(attr, map_method, **kwargs):
+ """
+ Method creates iterator from boto result set
+ """
+ args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
+ wait = 1
+ while True:
+ try:
+ data = map_method(**args)
+ for elm in data[attr]:
+ yield elm
+ if 'NextMarker' in data:
+ args['Marker'] = data['Nextmarker']
+ continue
+ break
+ except ClientError as e:
+ if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
+ sleep(wait)
+ wait = wait * 2
+ continue
+
+def targets_equal(keys, a, b):
+ """
+ Method compare two mount targets by specified attributes
+ """
+ for key in keys:
+ if key in b and a[key] != b[key]:
+ return False
+
+ return True
+
+
+def dict_diff(dict1, dict2, by_key=False):
+ """
+ Helper method to calculate difference of two dictionaries
+ """
+ keys1 = set(dict1.keys() if by_key else dict1.items())
+ keys2 = set(dict2.keys() if by_key else dict2.items())
+
+ intersection = keys1 & keys2
+
+ return keys2 ^ intersection, intersection, keys1 ^ intersection
+
+
+def first_or_default(items, default=None):
+ """
+ Helper method to fetch first element of list (if exists)
+ """
+ for item in items:
+ return item
+ return default
+
+
+def wait_for(callback, value, timeout=EFSConnection.DEFAULT_WAIT_TIMEOUT_SECONDS):
+ """
+ Helper method to wait for desired value returned by callback method
+ """
+ wait_start = timestamp()
+ while True:
+ if callback() != value:
+ if timeout != 0 and (timestamp() - wait_start > timeout):
+ raise RuntimeError('Wait timeout exceeded (' + str(timeout) + ' sec)')
+ else:
+ sleep(5)
+ continue
+ break
+
+
+def main():
+ """
+ Module action handler
+ """
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=False, type='str', choices=["present", "absent"], default="present"),
+ id=dict(required=False, type='str', default=None),
+ name=dict(required=False, type='str', default=None),
+ tags=dict(required=False, type="dict", default={}),
+ targets=dict(required=False, type="list", default=[]),
+ performance_mode=dict(required=False, type='str', choices=["general_purpose", "max_io"], default="general_purpose"),
+ wait=dict(required=False, type="bool", default=False),
+ wait_timeout=dict(required=False, type="int", default=0)
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
+ connection = EFSConnection(module, region, **aws_connect_params)
+
+ name = module.params.get('name')
+ fs_id = module.params.get('id')
+ tags = module.params.get('tags')
+ target_translations = {
+ 'ip_address': 'IpAddress',
+ 'security_groups': 'SecurityGroups',
+ 'subnet_id': 'SubnetId'
+ }
+ targets = [dict((target_translations[key], value) for (key, value) in x.items()) for x in module.params.get('targets')]
+ performance_mode_translations = {
+ 'general_purpose': 'generalPurpose',
+ 'max_io': 'maxIO'
+ }
+ performance_mode = performance_mode_translations[module.params.get('performance_mode')]
+ changed = False
+
+ state = str(module.params.get('state')).lower()
+
+ if state == 'present':
+ if not name:
+ module.fail_json(msg='Name parameter is required for create')
+
+ changed = connection.create_file_system(name, performance_mode)
+ changed = connection.converge_file_system(name=name, tags=tags, targets=targets) or changed
+ result = first_or_default(connection.get_file_systems(CreationToken=name))
+
+ elif state == 'absent':
+ if not name and not fs_id:
+ module.fail_json(msg='Either name or id parameter is required for delete')
+
+ changed = connection.delete_file_system(name, fs_id)
+ result = None
+ if result:
+ result = camel_dict_to_snake_dict(result)
+ module.exit_json(changed=changed, efs=result)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/efs_facts.py b/lib/ansible/modules/cloud/amazon/efs_facts.py
new file mode 100644
index 0000000000..aa7adf8bee
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/efs_facts.py
@@ -0,0 +1,379 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: efs_facts
+short_description: Get information about Amazon EFS file systems
+description:
+ - Module searches Amazon EFS file systems
+version_added: "2.2"
+requirements: [ boto3 ]
+author:
+ - "Ryan Sydnor (@ryansydnor)"
+options:
+ name:
+ description:
+ - Creation Token of Amazon EFS file system.
+ required: false
+ default: None
+ id:
+ description:
+ - ID of Amazon EFS.
+ required: false
+ default: None
+ tags:
+ description:
+ - List of tags of Amazon EFS. Should be defined as dictionary
+ required: false
+ default: None
+ targets:
+ description:
+ - "List of mounted targets. It should be a list of dictionaries, every dictionary should include next attributes:
+ - SubnetId - Mandatory. The ID of the subnet to add the mount target in.
+ - IpAddress - Optional. A valid IPv4 address within the address range of the specified subnet.
+ - SecurityGroups - Optional. List of security group IDs, of the form 'sg-xxxxxxxx'. These must be for the same VPC as subnet specified."
+ required: false
+ default: None
+extends_documentation_fragment:
+ - aws
+'''
+
+EXAMPLES = '''
+# find all existing efs
+- efs_facts:
+ register: result
+
+- efs_facts:
+ name: myTestNameTag
+
+- efs_facts:
+ id: fs-1234abcd
+
+# Searching all EFS instances with tag Name = 'myTestNameTag', in subnet 'subnet-1a2b3c4d' and with security group 'sg-4d3c2b1a'
+- efs_facts:
+ tags:
+ name: myTestNameTag
+ targets:
+ - subnet-1a2b3c4d
+ - sg-4d3c2b1a
+'''
+
+RETURN = '''
+creation_time:
+ description: timestamp of creation date
+ returned:
+ type: datetime
+ sample: 2015-11-16 07:30:57-05:00
+creation_token:
+ description: EFS creation token
+ returned:
+ type: UUID
+ sample: console-88609e04-9a0e-4a2e-912c-feaa99509961
+file_system_id:
+ description: ID of the file system
+ returned:
+ type: unique ID
+ sample: fs-xxxxxxxx
+life_cycle_state:
+ description: state of the EFS file system
+ returned:
+ type: str
+ sample: creating, available, deleting, deleted
+mount_point:
+ description: url of file system
+ returned:
+ type: str
+ sample: .fs-xxxxxxxx.efs.us-west-2.amazonaws.com:/
+mount_targets:
+ description: list of mount targets
+ returned:
+ type: list of dicts
+ sample:
+ [
+ {
+ "file_system_id": "fs-a7ad440e",
+ "ip_address": "172.31.17.173",
+ "life_cycle_state": "available",
+ "mount_target_id": "fsmt-d8907871",
+ "network_interface_id": "eni-6e387e26",
+ "owner_id": "740748460359",
+ "security_groups": [
+ "sg-a30b22c6"
+ ],
+ "subnet_id": "subnet-e265c895"
+ },
+ ...
+ ]
+name:
+ description: name of the file system
+ returned:
+ type: str
+ sample: my-efs
+number_of_mount_targets:
+ description: the number of targets mounted
+ returned:
+ type: int
+ sample: 3
+owner_id:
+ description: AWS account ID of EFS owner
+ returned:
+ type: str
+ sample: XXXXXXXXXXXX
+size_in_bytes:
+ description: size of the file system in bytes as of a timestamp
+ returned:
+ type: dict
+ sample:
+ {
+ "timestamp": "2015-12-21 13:59:59-05:00",
+ "value": 12288
+ }
+performance_mode:
+ description: performance mode of the file system
+ returned:
+ type: str
+ sample: "generalPurpose"
+tags:
+ description: tags on the efs instance
+ returned:
+ type: dict
+ sample:
+ {
+ "name": "my-efs",
+ "key": "Value"
+ }
+
+'''
+
+
+from time import sleep
+from collections import defaultdict
+
+try:
+ from botocore.exceptions import ClientError
+ import boto3
+ HAS_BOTO3 = True
+except ImportError as e:
+ HAS_BOTO3 = False
+
+class EFSConnection(object):
+ STATE_CREATING = 'creating'
+ STATE_AVAILABLE = 'available'
+ STATE_DELETING = 'deleting'
+ STATE_DELETED = 'deleted'
+
+ def __init__(self, module, region, **aws_connect_params):
+ try:
+ self.connection = boto3_conn(module, conn_type='client',
+ resource='efs', region=region,
+ **aws_connect_params)
+ except Exception as e:
+ module.fail_json(msg="Failed to connect to AWS: %s" % str(e))
+
+ self.region = region
+
+ def get_file_systems(self, **kwargs):
+ """
+ Returns generator of file systems including all attributes of FS
+ """
+ items = iterate_all(
+ 'FileSystems',
+ self.connection.describe_file_systems,
+ **kwargs
+ )
+ for item in items:
+ item['CreationTime'] = str(item['CreationTime'])
+ """
+ Suffix of network path to be used as NFS device for mount. More detail here:
+ http://docs.aws.amazon.com/efs/latest/ug/gs-step-three-connect-to-ec2-instance.html
+ """
+ item['MountPoint'] = '.%s.efs.%s.amazonaws.com:/' % (item['FileSystemId'], self.region)
+ if 'Timestamp' in item['SizeInBytes']:
+ item['SizeInBytes']['Timestamp'] = str(item['SizeInBytes']['Timestamp'])
+ if item['LifeCycleState'] == self.STATE_AVAILABLE:
+ item['Tags'] = self.get_tags(FileSystemId=item['FileSystemId'])
+ item['MountTargets'] = list(self.get_mount_targets(FileSystemId=item['FileSystemId']))
+ else:
+ item['Tags'] = {}
+ item['MountTargets'] = []
+ yield item
+
+ def get_tags(self, **kwargs):
+ """
+ Returns tag list for selected instance of EFS
+ """
+ tags = iterate_all(
+ 'Tags',
+ self.connection.describe_tags,
+ **kwargs
+ )
+ return dict((tag['Key'], tag['Value']) for tag in tags)
+
+ def get_mount_targets(self, **kwargs):
+ """
+ Returns mount targets for selected instance of EFS
+ """
+ targets = iterate_all(
+ 'MountTargets',
+ self.connection.describe_mount_targets,
+ **kwargs
+ )
+ for target in targets:
+ if target['LifeCycleState'] == self.STATE_AVAILABLE:
+ target['SecurityGroups'] = list(self.get_security_groups(
+ MountTargetId=target['MountTargetId']
+ ))
+ else:
+ target['SecurityGroups'] = []
+ yield target
+
+ def get_security_groups(self, **kwargs):
+ """
+ Returns security groups for selected instance of EFS
+ """
+ return iterate_all(
+ 'SecurityGroups',
+ self.connection.describe_mount_target_security_groups,
+ **kwargs
+ )
+
+
+def iterate_all(attr, map_method, **kwargs):
+ """
+ Method creates iterator from boto result set
+ """
+ args = dict((key, value) for (key, value) in kwargs.items() if value is not None)
+ wait = 1
+ while True:
+ try:
+ data = map_method(**args)
+ for elm in data[attr]:
+ yield elm
+ if 'NextMarker' in data:
+ args['Marker'] = data['Nextmarker']
+ continue
+ break
+ except ClientError as e:
+ if e.response['Error']['Code'] == "ThrottlingException" and wait < 600:
+ sleep(wait)
+ wait = wait * 2
+ continue
+
+
+def prefix_to_attr(attr_id):
+ """
+ Helper method to convert ID prefix to mount target attribute
+ """
+ attr_by_prefix = {
+ 'fsmt-': 'MountTargetId',
+ 'subnet-': 'SubnetId',
+ 'eni-': 'NetworkInterfaceId',
+ 'sg-': 'SecurityGroups'
+ }
+ prefix = first_or_default(filter(
+ lambda pref: str(attr_id).startswith(pref),
+ attr_by_prefix.keys()
+ ))
+ if prefix:
+ return attr_by_prefix[prefix]
+ return 'IpAddress'
+
+def first_or_default(items, default=None):
+ """
+ Helper method to fetch first element of list (if exists)
+ """
+ for item in items:
+ return item
+ return default
+
+def has_tags(available, required):
+ """
+ Helper method to determine if tag requested already exists
+ """
+ for key, value in required.items():
+ if key not in available or value != available[key]:
+ return False
+ return True
+
+def has_targets(available, required):
+ """
+ Helper method to determine if mount tager requested already exists
+ """
+ grouped = group_list_of_dict(available)
+ for (value, field) in required:
+ if field not in grouped or value not in grouped[field]:
+ return False
+ return True
+
+def group_list_of_dict(array):
+ """
+ Helper method to group list of dict to dict with all possible values
+ """
+ result = defaultdict(list)
+ for item in array:
+ for key, value in item.items():
+ result[key] += value if isinstance(value, list) else [value]
+ return result
+
+
+def main():
+ """
+ Module action handler
+ """
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ id=dict(required=False, type='str', default=None),
+ name=dict(required=False, type='str', default=None),
+ tags=dict(required=False, type="dict", default={}),
+ targets=dict(required=False, type="list", default=[])
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, _, aws_connect_params = get_aws_connection_info(module, boto3=True)
+ connection = EFSConnection(module, region, **aws_connect_params)
+
+ name = module.params.get('name')
+ fs_id = module.params.get('id')
+ tags = module.params.get('tags')
+ targets = module.params.get('targets')
+
+ file_systems_info = connection.get_file_systems(FileSystemId=fs_id, CreationToken=name)
+
+ if tags:
+ file_systems_info = filter(lambda item: has_tags(item['Tags'], tags), file_systems_info)
+
+ if targets:
+ targets = [(item, prefix_to_attr(item)) for item in targets]
+ file_systems_info = filter(lambda item:
+ has_targets(item['MountTargets'], targets), file_systems_info)
+
+ file_systems_info = [camel_dict_to_snake_dict(x) for x in file_systems_info]
+ module.exit_json(changed=False, ansible_facts={'efs': file_systems_info})
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/execute_lambda.py b/lib/ansible/modules/cloud/amazon/execute_lambda.py
new file mode 100644
index 0000000000..676d3c5e30
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/execute_lambda.py
@@ -0,0 +1,287 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: execute_lambda
+short_description: Execute an AWS Lambda function
+description:
+ - This module executes AWS Lambda functions, allowing synchronous and asynchronous
+ invocation.
+version_added: "2.2"
+extends_documentation_fragment:
+ - aws
+author: "Ryan Scott Brown (@ryansb) <ryansb@redhat.com>"
+requirements:
+ - python >= 2.6
+ - boto3
+notes:
+ - Async invocation will always return an empty C(output) key.
+ - Synchronous invocation may result in a function timeout, resulting in an
+ empty C(output) key.
+options:
+ name:
+ description:
+ - The name of the function to be invoked. This can only be used for
+ invocations within the calling account. To invoke a function in another
+ account, use I(function_arn) to specify the full ARN.
+ required: false
+ default: None
+ function_arn:
+ description:
+ - The name of the function to be invoked
+ required: false
+ default: None
+ tail_log:
+ description:
+ - If C(tail_log=true), the result of the task will include the last 4 KB
+ of the CloudWatch log for the function execution. Log tailing only
+ works if you use synchronous invocation C(wait=true). This is usually
+ used for development or testing Lambdas.
+ required: false
+ default: false
+ wait:
+ description:
+ - Whether to wait for the function results or not. If I(wait) is false,
+ the task will not return any results. To wait for the Lambda function
+ to complete, set C(wait=true) and the result will be available in the
+ I(output) key.
+ required: false
+ default: true
+ dry_run:
+ description:
+ - Do not *actually* invoke the function. A C(DryRun) call will check that
+ the caller has permissions to call the function, especially for
+ checking cross-account permissions.
+ required: false
+ default: False
+ version_qualifier:
+ description:
+ - Which version/alias of the function to run. This defaults to the
+ C(LATEST) revision, but can be set to any existing version or alias.
+ See https;//docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html
+ for details.
+ required: false
+ default: LATEST
+ payload:
+ description:
+ - A dictionary in any form to be provided as input to the Lambda function.
+ required: false
+ default: {}
+'''
+
+EXAMPLES = '''
+- execute_lambda:
+ name: test-function
+ # the payload is automatically serialized and sent to the function
+ payload:
+ foo: bar
+ value: 8
+ register: response
+
+# Test that you have sufficient permissions to execute a Lambda function in
+# another account
+- execute_lambda:
+ function_arn: arn:aws:lambda:us-east-1:123456789012:function/some-function
+ dry_run: true
+
+- execute_lambda:
+ name: test-function
+ payload:
+ foo: bar
+ value: 8
+ wait: true
+ tail_log: true
+ register: response
+ # the response will have a `logs` key that will contain a log (up to 4KB) of the function execution in Lambda.
+
+- execute_lambda:
+ name: test-function
+ version_qualifier: PRODUCTION
+'''
+
+RETURN = '''
+output:
+ description: Function output if wait=true and the function returns a value
+ returned: success
+ type: dict
+ sample: "{ 'output': 'something' }"
+logs:
+ description: The last 4KB of the function logs. Only provided if I(tail_log) is true
+ type: string
+status:
+ description: C(StatusCode) of API call exit (200 for synchronous invokes, 202 for async)
+ type: int
+ sample: 200
+'''
+
+import base64
+import json
+import traceback
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ name = dict(),
+ function_arn = dict(),
+ wait = dict(choices=BOOLEANS, default=True, type='bool'),
+ tail_log = dict(choices=BOOLEANS, default=False, type='bool'),
+ dry_run = dict(choices=BOOLEANS, default=False, type='bool'),
+ version_qualifier = dict(),
+ payload = dict(default={}, type='dict'),
+ ))
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[
+ ['name', 'function_arn'],
+ ]
+ )
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ name = module.params.get('name')
+ function_arn = module.params.get('function_arn')
+ await_return = module.params.get('wait')
+ dry_run = module.params.get('dry_run')
+ tail_log = module.params.get('tail_log')
+ version_qualifier = module.params.get('version_qualifier')
+ payload = module.params.get('payload')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='Python module "boto3" is missing, please install it')
+
+ if not (name or function_arn):
+ module.fail_json(msg="Must provide either a function_arn or a name to invoke.")
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=HAS_BOTO3)
+ if not region:
+ module.fail_json(msg="The AWS region must be specified as an "
+ "environment variable or in the AWS credentials "
+ "profile.")
+
+ try:
+ client = boto3_conn(module, conn_type='client', resource='lambda',
+ region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
+ module.fail_json(msg="Failure connecting boto3 to AWS", exception=traceback.format_exc(e))
+
+ invoke_params = {}
+
+ if await_return:
+ # await response
+ invoke_params['InvocationType'] = 'RequestResponse'
+ else:
+ # fire and forget
+ invoke_params['InvocationType'] = 'Event'
+ if dry_run or module.check_mode:
+ # dry_run overrides invocation type
+ invoke_params['InvocationType'] = 'DryRun'
+
+ if tail_log and await_return:
+ invoke_params['LogType'] = 'Tail'
+ elif tail_log and not await_return:
+ module.fail_json(msg="The `tail_log` parameter is only available if "
+ "the invocation waits for the function to complete. "
+ "Set `wait` to true or turn off `tail_log`.")
+ else:
+ invoke_params['LogType'] = 'None'
+
+ if version_qualifier:
+ invoke_params['Qualifier'] = version_qualifier
+
+ if payload:
+ invoke_params['Payload'] = json.dumps(payload)
+
+ if function_arn:
+ invoke_params['FunctionName'] = function_arn
+ elif name:
+ invoke_params['FunctionName'] = name
+
+ try:
+ response = client.invoke(**invoke_params)
+ except botocore.exceptions.ClientError as ce:
+ if ce.response['Error']['Code'] == 'ResourceNotFoundException':
+ module.fail_json(msg="Could not find Lambda to execute. Make sure "
+ "the ARN is correct and your profile has "
+ "permissions to execute this function.",
+ exception=traceback.format_exc(ce))
+ module.fail_json("Client-side error when invoking Lambda, check inputs and specific error",
+ exception=traceback.format_exc(ce))
+ except botocore.exceptions.ParamValidationError as ve:
+ module.fail_json(msg="Parameters to `invoke` failed to validate",
+ exception=traceback.format_exc(ve))
+ except Exception as e:
+ module.fail_json(msg="Unexpected failure while invoking Lambda function",
+ exception=traceback.format_exc(e))
+
+ results ={
+ 'logs': '',
+ 'status': response['StatusCode'],
+ 'output': '',
+ }
+
+ if response.get('LogResult'):
+ try:
+ # logs are base64 encoded in the API response
+ results['logs'] = base64.b64decode(response.get('LogResult', ''))
+ except Exception as e:
+ module.fail_json(msg="Failed while decoding logs", exception=traceback.format_exc(e))
+
+ if invoke_params['InvocationType'] == 'RequestResponse':
+ try:
+ results['output'] = json.loads(response['Payload'].read())
+ except Exception as e:
+ module.fail_json(msg="Failed while decoding function return value", exception=traceback.format_exc(e))
+
+ if isinstance(results.get('output'), dict) and any(
+ [results['output'].get('stackTrace'), results['output'].get('errorMessage')]):
+ # AWS sends back stack traces and error messages when a function failed
+ # in a RequestResponse (synchronous) context.
+ template = ("Function executed, but there was an error in the Lambda function. "
+ "Message: {errmsg}, Type: {type}, Stack Trace: {trace}")
+ error_data = {
+ # format the stacktrace sent back as an array into a multiline string
+ 'trace': '\n'.join(
+ [' '.join([
+ str(x) for x in line # cast line numbers to strings
+ ]) for line in results.get('output', {}).get('stackTrace', [])]
+ ),
+ 'errmsg': results['output'].get('errorMessage'),
+ 'type': results['output'].get('errorType')
+ }
+ module.fail_json(msg=template.format(**error_data), result=results)
+
+ module.exit_json(changed=True, result=results)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/iam_mfa_device_facts.py b/lib/ansible/modules/cloud/amazon/iam_mfa_device_facts.py
new file mode 100644
index 0000000000..539867663c
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/iam_mfa_device_facts.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: iam_mfa_device_facts
+short_description: List the MFA (Multi-Factor Authentication) devices registered for a user
+description:
+ - List the MFA (Multi-Factor Authentication) devices registered for a user
+version_added: "2.2"
+author: Victor Costan (@pwnall)
+options:
+ user_name:
+ description:
+ - The name of the user whose MFA devices will be listed
+ required: false
+ default: null
+extends_documentation_fragment:
+ - aws
+ - ec2
+requirements:
+ - boto3
+ - botocore
+'''
+
+RETURN = """
+mfa_devices:
+ description: The MFA devices registered for the given user
+ returned: always
+ type: list
+ sample:
+ - enable_date: "2016-03-11T23:25:36+00:00"
+ serial_number: arn:aws:iam::085120003701:mfa/pwnall
+ user_name: pwnall
+ - enable_date: "2016-03-11T23:25:37+00:00"
+ serial_number: arn:aws:iam::085120003702:mfa/pwnall
+ user_name: pwnall
+"""
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# List MFA devices (more details: http://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html)
+iam_mfa_device_facts:
+register: mfa_devices
+
+# Assume an existing role (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
+sts_assume_role:
+ mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}"
+ role_arn: "arn:aws:iam::123456789012:role/someRole"
+ role_session_name: "someRoleSession"
+register: assumed_role
+'''
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+def list_mfa_devices(connection, module):
+ user_name = module.params.get('user_name')
+ changed = False
+
+ args = {}
+ if user_name is not None:
+ args['UserName'] = user_name
+ try:
+ response = connection.list_mfa_devices(**args)
+ except ClientError as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ user_name=dict(required=False, default=None)
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if region:
+ connection = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ else:
+ module.fail_json(msg="region must be specified")
+
+ list_mfa_devices(connection, module)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/iam_server_certificate_facts.py b/lib/ansible/modules/cloud/amazon/iam_server_certificate_facts.py
new file mode 100644
index 0000000000..1c8637362f
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/iam_server_certificate_facts.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: iam_server_certificate_facts
+short_description: Retrieve the facts of a server certificate
+description:
+ - Retrieve the attributes of a server certificate
+version_added: "2.2"
+author: "Allen Sanabria (@linuxdynasty)"
+requirements: [boto3, botocore]
+options:
+ name:
+ description:
+ - The name of the server certificate you are retrieving attributes for.
+ required: true
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Retrieve server certificate
+- iam_server_certificate_facts:
+ name: production-cert
+ register: server_cert
+
+# Fail if the server certificate name was not found
+- iam_server_certificate_facts:
+ name: production-cert
+ register: server_cert
+ failed_when: "{{ server_cert.results | length == 0 }}"
+'''
+
+RETURN = '''
+server_certificate_id:
+ description: The 21 character certificate id
+ returned: success
+ type: str
+ sample: "ADWAJXWTZAXIPIMQHMJPO"
+certificate_body:
+ description: The asn1der encoded PEM string
+ returned: success
+ type: str
+ sample: "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----"
+server_certificate_name:
+ description: The name of the server certificate
+ returned: success
+ type: str
+ sample: "server-cert-name"
+arn:
+ description: The Amazon resource name of the server certificate
+ returned: success
+ type: str
+ sample: "arn:aws:iam::911277865346:server-certificate/server-cert-name"
+path:
+ description: The path of the server certificate
+ returned: success
+ type: str
+ sample: "/"
+expiration:
+ description: The date and time this server certificate will expire, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2017-06-15T12:00:00+00:00"
+upload_date:
+ description: The date and time this server certificate was uploaded, in ISO 8601 format.
+ returned: success
+ type: str
+ sample: "2015-04-25T00:36:40+00:00"
+'''
+
+
+try:
+ import boto3
+ import botocore.exceptions
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+def get_server_certs(iam, name=None):
+ """Retrieve the attributes of a server certificate if it exists or all certs.
+ Args:
+ iam (botocore.client.IAM): The boto3 iam instance.
+
+ Kwargs:
+ name (str): The name of the server certificate.
+
+ Basic Usage:
+ >>> import boto3
+ >>> iam = boto3.client('iam')
+ >>> name = "server-cert-name"
+ >>> results = get_server_certs(iam, name)
+ {
+ "upload_date": "2015-04-25T00:36:40+00:00",
+ "server_certificate_id": "ADWAJXWTZAXIPIMQHMJPO",
+ "certificate_body": "-----BEGIN CERTIFICATE-----\nbunch of random data\n-----END CERTIFICATE-----",
+ "server_certificate_name": "server-cert-name",
+ "expiration": "2017-06-15T12:00:00+00:00",
+ "path": "/",
+ "arn": "arn:aws:iam::911277865346:server-certificate/server-cert-name"
+ }
+ """
+ results = dict()
+ try:
+ if name:
+ server_certs = [iam.get_server_certificate(ServerCertificateName=name)['ServerCertificate']]
+ else:
+ server_certs = iam.list_server_certificates()['ServerCertificateMetadataList']
+
+ for server_cert in server_certs:
+ if not name:
+ server_cert = iam.get_server_certificate(ServerCertificateName=server_cert['ServerCertificateName'])['ServerCertificate']
+ cert_md = server_cert['ServerCertificateMetadata']
+ results[cert_md['ServerCertificateName']] = {
+ 'certificate_body': server_cert['CertificateBody'],
+ 'server_certificate_id': cert_md['ServerCertificateId'],
+ 'server_certificate_name': cert_md['ServerCertificateName'],
+ 'arn': cert_md['Arn'],
+ 'path': cert_md['Path'],
+ 'expiration': cert_md['Expiration'].isoformat(),
+ 'upload_date': cert_md['UploadDate'].isoformat(),
+ }
+
+ except botocore.exceptions.ClientError:
+ pass
+
+ return results
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str'),
+ ))
+
+ module = AnsibleModule(argument_spec=argument_spec,)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ iam = boto3_conn(module, conn_type='client', resource='iam', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except botocore.exceptions.ClientError as e:
+ module.fail_json(msg="Boto3 Client Error - " + str(e.msg))
+
+ cert_name = module.params.get('name')
+ results = get_server_certs(iam, cert_name)
+ module.exit_json(results=results)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/kinesis_stream.py b/lib/ansible/modules/cloud/amazon/kinesis_stream.py
new file mode 100644
index 0000000000..b4e0f7205b
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/kinesis_stream.py
@@ -0,0 +1,1102 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: kinesis_stream
+short_description: Manage a Kinesis Stream.
+description:
+ - Create or Delete a Kinesis Stream.
+ - Update the retention period of a Kinesis Stream.
+ - Update Tags on a Kinesis Stream.
+version_added: "2.2"
+author: Allen Sanabria (@linuxdynasty)
+options:
+ name:
+ description:
+ - "The name of the Kinesis Stream you are managing."
+ default: None
+ required: true
+ shards:
+ description:
+ - "The number of shards you want to have with this stream. This can not
+ be modified after being created."
+ - "This is required when state == present"
+ required: false
+ default: None
+ retention_period:
+ description:
+ - "The default retention period is 24 hours and can not be less than 24
+ hours."
+ - "The retention period can be modified during any point in time."
+ required: false
+ default: None
+ state:
+ description:
+ - "Create or Delete the Kinesis Stream."
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ wait:
+ description:
+ - Wait for operation to complete before returning.
+ required: false
+ default: true
+ wait_timeout:
+ description:
+ - How many seconds to wait for an operation to complete before timing out.
+ required: false
+ default: 300
+ tags:
+ description:
+ - "A dictionary of resource tags of the form: { tag1: value1, tag2: value2 }."
+ required: false
+ default: null
+ aliases: [ "resource_tags" ]
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Basic creation example:
+- name: Set up Kinesis Stream with 10 shards and wait for the stream to become ACTIVE
+ kinesis_stream:
+ name: test-stream
+ shards: 10
+ wait: yes
+ wait_timeout: 600
+ register: test_stream
+
+# Basic creation example with tags:
+- name: Set up Kinesis Stream with 10 shards, tag the environment, and wait for the stream to become ACTIVE
+ kinesis_stream:
+ name: test-stream
+ shards: 10
+ tags:
+ Env: development
+ wait: yes
+ wait_timeout: 600
+ register: test_stream
+
+# Basic creation example with tags and increase the retention period from the default 24 hours to 48 hours:
+- name: Set up Kinesis Stream with 10 shards, tag the environment, increase the retention period and wait for the stream to become ACTIVE
+ kinesis_stream:
+ name: test-stream
+ retention_period: 48
+ shards: 10
+ tags:
+ Env: development
+ wait: yes
+ wait_timeout: 600
+ register: test_stream
+
+# Basic delete example:
+- name: Delete Kinesis Stream test-stream and wait for it to finish deleting.
+ kinesis_stream:
+ name: test-stream
+ state: absent
+ wait: yes
+ wait_timeout: 600
+ register: test_stream
+'''
+
+RETURN = '''
+stream_name:
+ description: The name of the Kinesis Stream.
+ returned: when state == present.
+ type: string
+ sample: "test-stream"
+stream_arn:
+ description: The amazon resource identifier
+ returned: when state == present.
+ type: string
+ sample: "arn:aws:kinesis:east-side:123456789:stream/test-stream"
+stream_status:
+ description: The current state of the Kinesis Stream.
+ returned: when state == present.
+ type: string
+ sample: "ACTIVE"
+retention_period_hours:
+ description: Number of hours messages will be kept for a Kinesis Stream.
+ returned: when state == present.
+ type: int
+ sample: 24
+tags:
+ description: Dictionary containing all the tags associated with the Kinesis stream.
+ returned: when state == present.
+ type: dict
+ sample: {
+ "Name": "Splunk",
+ "Env": "development"
+ }
+'''
+
+try:
+ import botocore
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+import re
+import datetime
+import time
+from functools import reduce
+
+
+def convert_to_lower(data):
+ """Convert all uppercase keys in dict with lowercase_
+ Args:
+ data (dict): Dictionary with keys that have upper cases in them
+ Example.. FooBar == foo_bar
+ if a val is of type datetime.datetime, it will be converted to
+ the ISO 8601
+
+ Basic Usage:
+ >>> test = {'FooBar': []}
+ >>> test = convert_to_lower(test)
+ {
+ 'foo_bar': []
+ }
+
+ Returns:
+ Dictionary
+ """
+ results = dict()
+ if isinstance(data, dict):
+ for key, val in data.items():
+ key = re.sub(r'(([A-Z]{1,3}){1})', r'_\1', key).lower()
+ if key[0] == '_':
+ key = key[1:]
+ if isinstance(val, datetime.datetime):
+ results[key] = val.isoformat()
+ elif isinstance(val, dict):
+ results[key] = convert_to_lower(val)
+ elif isinstance(val, list):
+ converted = list()
+ for item in val:
+ converted.append(convert_to_lower(item))
+ results[key] = converted
+ else:
+ results[key] = val
+ return results
+
+
+def make_tags_in_proper_format(tags):
+ """Take a dictionary of tags and convert them into the AWS Tags format.
+ Args:
+ tags (list): The tags you want applied.
+
+ Basic Usage:
+ >>> tags = [{'Key': 'env', 'Value': 'development'}]
+ >>> make_tags_in_proper_format(tags)
+ {
+ "env": "development",
+ }
+
+ Returns:
+ Dict
+ """
+ formatted_tags = dict()
+ for tag in tags:
+ formatted_tags[tag.get('Key')] = tag.get('Value')
+
+ return formatted_tags
+
+
+def make_tags_in_aws_format(tags):
+ """Take a dictionary of tags and convert them into the AWS Tags format.
+ Args:
+ tags (dict): The tags you want applied.
+
+ Basic Usage:
+ >>> tags = {'env': 'development', 'service': 'web'}
+ >>> make_tags_in_proper_format(tags)
+ [
+ {
+ "Value": "web",
+ "Key": "service"
+ },
+ {
+ "Value": "development",
+ "key": "env"
+ }
+ ]
+
+ Returns:
+ List
+ """
+ formatted_tags = list()
+ for key, val in tags.items():
+ formatted_tags.append({
+ 'Key': key,
+ 'Value': val
+ })
+
+ return formatted_tags
+
+
+def get_tags(client, stream_name, check_mode=False):
+ """Retrieve the tags for a Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): Name of the Kinesis stream.
+
+ Kwargs:
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >> get_tags(client, stream_name)
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ err_msg = ''
+ success = False
+ params = {
+ 'StreamName': stream_name,
+ }
+ results = dict()
+ try:
+ if not check_mode:
+ results = (
+ client.list_tags_for_stream(**params)['Tags']
+ )
+ else:
+ results = [
+ {
+ 'Key': 'DryRunMode',
+ 'Value': 'true'
+ },
+ ]
+ success = True
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return success, err_msg, results
+
+
+def find_stream(client, stream_name, check_mode=False):
+ """Retrieve a Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): Name of the Kinesis stream.
+
+ Kwargs:
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ err_msg = ''
+ success = False
+ params = {
+ 'StreamName': stream_name,
+ }
+ results = dict()
+ has_more_shards = True
+ shards = list()
+ try:
+ if not check_mode:
+ while has_more_shards:
+ results = (
+ client.describe_stream(**params)['StreamDescription']
+ )
+ shards.extend(results.pop('Shards'))
+ has_more_shards = results['HasMoreShards']
+ results['Shards'] = shards
+ results['ShardsCount'] = len(shards)
+ else:
+ results = {
+ 'HasMoreShards': True,
+ 'RetentionPeriodHours': 24,
+ 'StreamName': stream_name,
+ 'StreamARN': 'arn:aws:kinesis:east-side:123456789:stream/{0}'.format(stream_name),
+ 'StreamStatus': 'ACTIVE'
+ }
+ success = True
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return success, err_msg, results
+
+
+def wait_for_status(client, stream_name, status, wait_timeout=300,
+ check_mode=False):
+ """Wait for the the status to change for a Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client
+ stream_name (str): The name of the kinesis stream.
+ status (str): The status to wait for.
+ examples. status=available, status=deleted
+
+ Kwargs:
+ wait_timeout (int): Number of seconds to wait, until this timeout is reached.
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> wait_for_status(client, stream_name, 'ACTIVE', 300)
+
+ Returns:
+ Tuple (bool, str, dict)
+ """
+ polling_increment_secs = 5
+ wait_timeout = time.time() + wait_timeout
+ status_achieved = False
+ stream = dict()
+ err_msg = ""
+
+ while wait_timeout > time.time():
+ try:
+ find_success, find_msg, stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ if check_mode:
+ status_achieved = True
+ break
+
+ elif status != 'DELETING':
+ if find_success and stream:
+ if stream.get('StreamStatus') == status:
+ status_achieved = True
+ break
+
+ elif status == 'DELETING' and not check_mode:
+ if not find_success:
+ status_achieved = True
+ break
+
+ else:
+ time.sleep(polling_increment_secs)
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ if not status_achieved:
+ err_msg = "Wait time out reached, while waiting for results"
+ else:
+ err_msg = "Status {0} achieved successfully".format(status)
+
+ return status_achieved, err_msg, stream
+
+
+def tags_action(client, stream_name, tags, action='create', check_mode=False):
+ """Create or delete multiple tags from a Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ resource_id (str): The Amazon resource id.
+ tags (list): List of dictionaries.
+ examples.. [{Name: "", Values: [""]}]
+
+ Kwargs:
+ action (str): The action to perform.
+ valid actions == create and delete
+ default=create
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> resource_id = 'pcx-123345678'
+ >>> tags = {'env': 'development'}
+ >>> update_tags(client, resource_id, tags)
+ [True, '']
+
+ Returns:
+ List (bool, str)
+ """
+ success = False
+ err_msg = ""
+ params = {'StreamName': stream_name}
+ try:
+ if not check_mode:
+ if action == 'create':
+ params['Tags'] = tags
+ client.add_tags_to_stream(**params)
+ success = True
+ elif action == 'delete':
+ params['TagKeys'] = tags.keys()
+ client.remove_tags_from_stream(**params)
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+ else:
+ if action == 'create':
+ success = True
+ elif action == 'delete':
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return success, err_msg
+
+
+def recreate_tags_from_list(list_of_tags):
+ """Recreate tags from a list of tuples into the Amazon Tag format.
+ Args:
+ list_of_tags (list): List of tuples.
+
+ Basic Usage:
+ >>> list_of_tags = [('Env', 'Development')]
+ >>> recreate_tags_from_list(list_of_tags)
+ [
+ {
+ "Value": "Development",
+ "Key": "Env"
+ }
+ ]
+
+ Returns:
+ List
+ """
+ tags = list()
+ i = 0
+ list_of_tags = list_of_tags
+ for i in range(len(list_of_tags)):
+ key_name = list_of_tags[i][0]
+ key_val = list_of_tags[i][1]
+ tags.append(
+ {
+ 'Key': key_name,
+ 'Value': key_val
+ }
+ )
+ return tags
+
+
+def update_tags(client, stream_name, tags, check_mode=False):
+ """Update tags for an amazon resource.
+ Args:
+ resource_id (str): The Amazon resource id.
+ tags (dict): Dictionary of tags you want applied to the Kinesis stream.
+
+ Kwargs:
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('ec2')
+ >>> stream_name = 'test-stream'
+ >>> tags = {'env': 'development'}
+ >>> update_tags(client, stream_name, tags)
+ [True, '']
+
+ Return:
+ Tuple (bool, str)
+ """
+ success = False
+ changed = False
+ err_msg = ''
+ tag_success, tag_msg, current_tags = (
+ get_tags(client, stream_name, check_mode=check_mode)
+ )
+ if current_tags:
+ tags = make_tags_in_aws_format(tags)
+ current_tags_set = (
+ set(
+ reduce(
+ lambda x, y: x + y,
+ [make_tags_in_proper_format(current_tags).items()]
+ )
+ )
+ )
+
+ new_tags_set = (
+ set(
+ reduce(
+ lambda x, y: x + y,
+ [make_tags_in_proper_format(tags).items()]
+ )
+ )
+ )
+ tags_to_delete = list(current_tags_set.difference(new_tags_set))
+ tags_to_update = list(new_tags_set.difference(current_tags_set))
+ if tags_to_delete:
+ tags_to_delete = make_tags_in_proper_format(
+ recreate_tags_from_list(tags_to_delete)
+ )
+ delete_success, delete_msg = (
+ tags_action(
+ client, stream_name, tags_to_delete, action='delete',
+ check_mode=check_mode
+ )
+ )
+ if not delete_success:
+ return delete_success, changed, delete_msg
+ if tags_to_update:
+ tags = make_tags_in_proper_format(
+ recreate_tags_from_list(tags_to_update)
+ )
+ else:
+ return True, changed, 'Tags do not need to be updated'
+
+ if tags:
+ create_success, create_msg = (
+ tags_action(
+ client, stream_name, tags, action='create',
+ check_mode=check_mode
+ )
+ )
+ if create_success:
+ changed = True
+ return create_success, changed, create_msg
+
+ return success, changed, err_msg
+
+
+def stream_action(client, stream_name, shard_count=1, action='create',
+ timeout=300, check_mode=False):
+ """Create or Delete an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ shard_count (int): Number of shards this stream will use.
+ action (str): The action to perform.
+ valid actions == create and delete
+ default=create
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> shard_count = 20
+ >>> stream_action(client, stream_name, shard_count, action='create')
+
+ Returns:
+ List (bool, str)
+ """
+ success = False
+ err_msg = ''
+ params = {
+ 'StreamName': stream_name
+ }
+ try:
+ if not check_mode:
+ if action == 'create':
+ params['ShardCount'] = shard_count
+ client.create_stream(**params)
+ success = True
+ elif action == 'delete':
+ client.delete_stream(**params)
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+ else:
+ if action == 'create':
+ success = True
+ elif action == 'delete':
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return success, err_msg
+
+
+def retention_action(client, stream_name, retention_period=24,
+ action='increase', check_mode=False):
+ """Increase or Decreaste the retention of messages in the Kinesis stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The
+
+ Kwargs:
+ retention_period (int): This is how long messages will be kept before
+ they are discarded. This can not be less than 24 hours.
+ action (str): The action to perform.
+ valid actions == create and delete
+ default=create
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> retention_period = 48
+ >>> stream_action(client, stream_name, retention_period, action='create')
+
+ Returns:
+ Tuple (bool, str)
+ """
+ success = False
+ err_msg = ''
+ params = {
+ 'StreamName': stream_name
+ }
+ try:
+ if not check_mode:
+ if action == 'increase':
+ params['RetentionPeriodHours'] = retention_period
+ client.increase_stream_retention_period(**params)
+ success = True
+ err_msg = (
+ 'Retention Period increased successfully to {0}'
+ .format(retention_period)
+ )
+ elif action == 'decrease':
+ params['RetentionPeriodHours'] = retention_period
+ client.decrease_stream_retention_period(**params)
+ success = True
+ err_msg = (
+ 'Retention Period decreased successfully to {0}'
+ .format(retention_period)
+ )
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+ else:
+ if action == 'increase':
+ success = True
+ elif action == 'decrease':
+ success = True
+ else:
+ err_msg = 'Invalid action {0}'.format(action)
+
+ except botocore.exceptions.ClientError as e:
+ err_msg = str(e)
+
+ return success, err_msg
+
+
+def update(client, current_stream, stream_name, retention_period=None,
+ tags=None, wait=False, wait_timeout=300, check_mode=False):
+ """Update an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ retention_period (int): This is how long messages will be kept before
+ they are discarded. This can not be less than 24 hours.
+ tags (dict): The tags you want applied.
+ wait (bool): Wait until Stream is ACTIVE.
+ default=False
+ wait_timeout (int): How long to wait until this operation is considered failed.
+ default=300
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> current_stream = {
+ 'HasMoreShards': True,
+ 'RetentionPeriodHours': 24,
+ 'StreamName': 'test-stream',
+ 'StreamARN': 'arn:aws:kinesis:us-west-2:123456789:stream/test-stream',
+ 'StreamStatus': "ACTIVE'
+ }
+ >>> stream_name = 'test-stream'
+ >>> retention_period = 48
+ >>> stream_action(client, current_stream, stream_name,
+ retention_period, action='create' )
+
+ Returns:
+ Tuple (bool, bool, str)
+ """
+ success = True
+ changed = False
+ err_msg = ''
+ if retention_period:
+ if wait:
+ wait_success, wait_msg, current_stream = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ if not wait_success:
+ return wait_success, False, wait_msg
+
+ if current_stream['StreamStatus'] == 'ACTIVE':
+ retention_changed = False
+ if retention_period > current_stream['RetentionPeriodHours']:
+ retention_changed, retention_msg = (
+ retention_action(
+ client, stream_name, retention_period, action='increase',
+ check_mode=check_mode
+ )
+ )
+
+ elif retention_period < current_stream['RetentionPeriodHours']:
+ retention_changed, retention_msg = (
+ retention_action(
+ client, stream_name, retention_period, action='decrease',
+ check_mode=check_mode
+ )
+ )
+
+ elif retention_period == current_stream['RetentionPeriodHours']:
+ retention_msg = (
+ 'Retention {0} is the same as {1}'
+ .format(
+ retention_period,
+ current_stream['RetentionPeriodHours']
+ )
+ )
+ success = True
+
+ if retention_changed:
+ success = True
+ changed = True
+
+ err_msg = retention_msg
+ if changed and wait:
+ wait_success, wait_msg, current_stream = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ if not wait_success:
+ return wait_success, False, wait_msg
+ elif changed and not wait:
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ if stream_found:
+ if current_stream['StreamStatus'] != 'ACTIVE':
+ err_msg = (
+ 'Retention Period for {0} is in the process of updating'
+ .format(stream_name)
+ )
+ return success, changed, err_msg
+ else:
+ err_msg = (
+ 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}'
+ .format(current_stream['StreamStatus'])
+ )
+ return success, changed, err_msg
+
+ if tags:
+ _, _, err_msg = (
+ update_tags(client, stream_name, tags, check_mode=check_mode)
+ )
+ if wait:
+ success, err_msg, _ = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ if success and changed:
+ err_msg = 'Kinesis Stream {0} updated successfully.'.format(stream_name)
+ elif success and not changed:
+ err_msg = 'Kinesis Stream {0} did not changed.'.format(stream_name)
+
+ return success, changed, err_msg
+
+
+def create_stream(client, stream_name, number_of_shards=1, retention_period=None,
+ tags=None, wait=False, wait_timeout=300, check_mode=False):
+ """Create an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ number_of_shards (int): Number of shards this stream will use.
+ default=1
+ retention_period (int): Can not be less than 24 hours
+ default=None
+ tags (dict): The tags you want applied.
+ default=None
+ wait (bool): Wait until Stream is ACTIVE.
+ default=False
+ wait_timeout (int): How long to wait until this operation is considered failed.
+ default=300
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> number_of_shards = 10
+ >>> tags = {'env': 'test'}
+ >>> create_stream(client, stream_name, number_of_shards, tags=tags)
+
+ Returns:
+ Tuple (bool, bool, str, dict)
+ """
+ success = False
+ changed = False
+ err_msg = ''
+ results = dict()
+
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ if stream_found and not check_mode:
+ if current_stream['ShardsCount'] != number_of_shards:
+ err_msg = 'Can not change the number of shards in a Kinesis Stream'
+ return success, changed, err_msg, results
+
+ if stream_found and current_stream['StreamStatus'] == 'DELETING' and wait:
+ wait_success, wait_msg, current_stream = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ if stream_found and current_stream['StreamStatus'] != 'DELETING':
+ success, changed, err_msg = update(
+ client, current_stream, stream_name, retention_period, tags,
+ wait, wait_timeout, check_mode=check_mode
+ )
+ else:
+ create_success, create_msg = (
+ stream_action(
+ client, stream_name, number_of_shards, action='create',
+ check_mode=check_mode
+ )
+ )
+ if create_success:
+ changed = True
+ if wait:
+ wait_success, wait_msg, results = (
+ wait_for_status(
+ client, stream_name, 'ACTIVE', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ err_msg = (
+ 'Kinesis Stream {0} is in the process of being created'
+ .format(stream_name)
+ )
+ if not wait_success:
+ return wait_success, True, wait_msg, results
+ else:
+ err_msg = (
+ 'Kinesis Stream {0} created successfully'
+ .format(stream_name)
+ )
+
+ if tags:
+ changed, err_msg = (
+ tags_action(
+ client, stream_name, tags, action='create',
+ check_mode=check_mode
+ )
+ )
+ if changed:
+ success = True
+ if not success:
+ return success, changed, err_msg, results
+
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ if retention_period and current_stream['StreamStatus'] == 'ACTIVE':
+ changed, err_msg = (
+ retention_action(
+ client, stream_name, retention_period, action='increase',
+ check_mode=check_mode
+ )
+ )
+ if changed:
+ success = True
+ if not success:
+ return success, changed, err_msg, results
+ else:
+ err_msg = (
+ 'StreamStatus has to be ACTIVE in order to modify the retention period. Current status is {0}'
+ .format(current_stream['StreamStatus'])
+ )
+ success = create_success
+ changed = True
+
+ if success:
+ _, _, results = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ _, _, current_tags = (
+ get_tags(client, stream_name, check_mode=check_mode)
+ )
+ if current_tags and not check_mode:
+ current_tags = make_tags_in_proper_format(current_tags)
+ results['Tags'] = current_tags
+ elif check_mode and tags:
+ results['Tags'] = tags
+ else:
+ results['Tags'] = dict()
+ results = convert_to_lower(results)
+
+ return success, changed, err_msg, results
+
+
+def delete_stream(client, stream_name, wait=False, wait_timeout=300,
+ check_mode=False):
+ """Delete an Amazon Kinesis Stream.
+ Args:
+ client (botocore.client.EC2): Boto3 client.
+ stream_name (str): The name of the kinesis stream.
+
+ Kwargs:
+ wait (bool): Wait until Stream is ACTIVE.
+ default=False
+ wait_timeout (int): How long to wait until this operation is considered failed.
+ default=300
+ check_mode (bool): This will pass DryRun as one of the parameters to the aws api.
+ default=False
+
+ Basic Usage:
+ >>> client = boto3.client('kinesis')
+ >>> stream_name = 'test-stream'
+ >>> delete_stream(client, stream_name)
+
+ Returns:
+ Tuple (bool, bool, str, dict)
+ """
+ success = False
+ changed = False
+ err_msg = ''
+ results = dict()
+ stream_found, stream_msg, current_stream = (
+ find_stream(client, stream_name, check_mode=check_mode)
+ )
+ if stream_found:
+ success, err_msg = (
+ stream_action(
+ client, stream_name, action='delete', check_mode=check_mode
+ )
+ )
+ if success:
+ changed = True
+ if wait:
+ success, err_msg, results = (
+ wait_for_status(
+ client, stream_name, 'DELETING', wait_timeout,
+ check_mode=check_mode
+ )
+ )
+ err_msg = 'Stream {0} deleted successfully'.format(stream_name)
+ if not success:
+ return success, True, err_msg, results
+ else:
+ err_msg = (
+ 'Stream {0} is in the process of being deleted'
+ .format(stream_name)
+ )
+ else:
+ success = True
+ changed = False
+ err_msg = 'Stream {0} does not exist'.format(stream_name)
+
+ return success, changed, err_msg, results
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(default=None, required=True),
+ shards=dict(default=None, required=False, type='int'),
+ retention_period=dict(default=None, required=False, type='int'),
+ tags=dict(default=None, required=False, type='dict', aliases=['resource_tags']),
+ wait=dict(default=True, required=False, type='bool'),
+ wait_timeout=dict(default=300, required=False, type='int'),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ retention_period = module.params.get('retention_period')
+ stream_name = module.params.get('name')
+ shards = module.params.get('shards')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ if state == 'present' and not shards:
+ module.fail_json(msg='Shards is required when state == present.')
+
+ if retention_period:
+ if retention_period < 24:
+ module.fail_json(msg='Retention period can not be less than 24 hours.')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required.')
+
+ check_mode = module.check_mode
+ try:
+ region, ec2_url, aws_connect_kwargs = (
+ get_aws_connection_info(module, boto3=True)
+ )
+ client = (
+ boto3_conn(
+ module, conn_type='client', resource='kinesis',
+ region=region, endpoint=ec2_url, **aws_connect_kwargs
+ )
+ )
+ except botocore.exceptions.ClientError as e:
+ err_msg = 'Boto3 Client Error - {0}'.format(str(e.msg))
+ module.fail_json(
+ success=False, changed=False, result={}, msg=err_msg
+ )
+
+ if state == 'present':
+ success, changed, err_msg, results = (
+ create_stream(
+ client, stream_name, shards, retention_period, tags,
+ wait, wait_timeout, check_mode
+ )
+ )
+ elif state == 'absent':
+ success, changed, err_msg, results = (
+ delete_stream(client, stream_name, wait, wait_timeout, check_mode)
+ )
+
+ if success:
+ module.exit_json(
+ success=success, changed=changed, msg=err_msg, **results
+ )
+ else:
+ module.fail_json(
+ success=success, changed=changed, msg=err_msg, result=results
+ )
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/lambda.py b/lib/ansible/modules/cloud/amazon/lambda.py
new file mode 100644
index 0000000000..cef3b38e30
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/lambda.py
@@ -0,0 +1,473 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: lambda
+short_description: Manage AWS Lambda functions
+description:
+ - Allows for the management of Lambda functions.
+version_added: '2.2'
+requirements: [ boto3 ]
+options:
+ name:
+ description:
+ - The name you want to assign to the function you are uploading. Cannot be changed.
+ required: true
+ state:
+ description:
+ - Create or delete Lambda function
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ runtime:
+ description:
+ - The runtime environment for the Lambda function you are uploading. Required when creating a function. Use parameters as described in boto3 docs. Current example runtime environments are nodejs, nodejs4.3, java8 or python2.7
+ required: true
+ role:
+ description:
+ - The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS) resources. You may use the bare ARN if the role belongs to the same AWS account.
+ default: null
+ handler:
+ description:
+ - The function within your code that Lambda calls to begin execution
+ default: null
+ zip_file:
+ description:
+ - A .zip file containing your deployment package
+ required: false
+ default: null
+ aliases: [ 'src' ]
+ s3_bucket:
+ description:
+ - Amazon S3 bucket name where the .zip file containing your deployment package is stored
+ required: false
+ default: null
+ s3_key:
+ description:
+ - The Amazon S3 object (the deployment package) key name you want to upload
+ required: false
+ default: null
+ s3_object_version:
+ description:
+ - The Amazon S3 object (the deployment package) version you want to upload.
+ required: false
+ default: null
+ description:
+ description:
+ - A short, user-defined function description. Lambda does not use this value. Assign a meaningful description as you see fit.
+ required: false
+ default: null
+ timeout:
+ description:
+ - The function execution time at which Lambda should terminate the function.
+ required: false
+ default: 3
+ memory_size:
+ description:
+ - The amount of memory, in MB, your Lambda function is given
+ required: false
+ default: 128
+ vpc_subnet_ids:
+ description:
+ - List of subnet IDs to run Lambda function in. Use this option if you need to access resources in your VPC. Leave empty if you don't want to run the function in a VPC.
+ required: false
+ default: None
+ vpc_security_group_ids:
+ description:
+ - List of VPC security group IDs to associate with the Lambda function. Required when vpc_subnet_ids is used.
+ required: false
+ default: None
+notes:
+ - 'Currently this module only supports uploaded code via S3'
+author:
+ - 'Steyn Huizinga (@steynovich)'
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Create Lambda functions
+tasks:
+- name: looped creation
+ lambda:
+ name: '{{ item.name }}'
+ state: present
+ zip_file: '{{ item.zip_file }}'
+ runtime: 'python2.7'
+ role: 'arn:aws:iam::987654321012:role/lambda_basic_execution'
+ handler: 'hello_python.my_handler'
+ vpc_subnet_ids:
+ - subnet-123abcde
+ - subnet-edcba321
+ vpc_security_group_ids:
+ - sg-123abcde
+ - sg-edcba321
+ with_items:
+ - name: HelloWorld
+ zip_file: hello-code.zip
+ - name: ByeBye
+ zip_file: bye-code.zip
+
+# Basic Lambda function deletion
+tasks:
+- name: Delete Lambda functions HelloWorld and ByeBye
+ lambda:
+ name: '{{ item }}'
+ state: absent
+ with_items:
+ - HelloWorld
+ - ByeBye
+'''
+
+RETURN = '''
+output:
+ description: the data returned by create_function in boto3
+ returned: success
+ type: dict
+ sample:
+ 'code':
+ {
+ 'location': 'an S3 URL',
+ 'repository_type': 'S3',
+ }
+ 'configuration':
+ {
+ 'function_name': 'string',
+ 'function_arn': 'string',
+ 'runtime': 'nodejs',
+ 'role': 'string',
+ 'handler': 'string',
+ 'code_size': 123,
+ 'description': 'string',
+ 'timeout': 123,
+ 'memory_size': 123,
+ 'last_modified': 'string',
+ 'code_sha256': 'string',
+ 'version': 'string',
+ }
+'''
+
+# Import from Python standard library
+import base64
+import hashlib
+
+try:
+ import botocore
+ HAS_BOTOCORE = True
+except ImportError:
+ HAS_BOTOCORE = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+def get_current_function(connection, function_name, qualifier=None):
+ try:
+ if qualifier is not None:
+ return connection.get_function(FunctionName=function_name,
+ Qualifier=qualifier)
+ return connection.get_function(FunctionName=function_name)
+ except botocore.exceptions.ClientError:
+ return None
+
+
+def sha256sum(filename):
+ hasher = hashlib.sha256()
+ with open(filename, 'rb') as f:
+ hasher.update(f.read())
+
+ code_hash = hasher.digest()
+ code_b64 = base64.b64encode(code_hash)
+ hex_digest = code_b64.decode('utf-8')
+
+ return hex_digest
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ runtime=dict(type='str', required=True),
+ role=dict(type='str', default=None),
+ handler=dict(type='str', default=None),
+ zip_file=dict(type='str', default=None, aliases=['src']),
+ s3_bucket=dict(type='str'),
+ s3_key=dict(type='str'),
+ s3_object_version=dict(type='str', default=None),
+ description=dict(type='str', default=''),
+ timeout=dict(type='int', default=3),
+ memory_size=dict(type='int', default=128),
+ vpc_subnet_ids=dict(type='list', default=None),
+ vpc_security_group_ids=dict(type='list', default=None),
+ )
+ )
+
+ mutually_exclusive = [['zip_file', 's3_key'],
+ ['zip_file', 's3_bucket'],
+ ['zip_file', 's3_object_version']]
+
+ required_together = [['s3_key', 's3_bucket', 's3_object_version'],
+ ['vpc_subnet_ids', 'vpc_security_group_ids']]
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ required_together=required_together)
+
+ name = module.params.get('name')
+ state = module.params.get('state').lower()
+ runtime = module.params.get('runtime')
+ role = module.params.get('role')
+ handler = module.params.get('handler')
+ s3_bucket = module.params.get('s3_bucket')
+ s3_key = module.params.get('s3_key')
+ s3_object_version = module.params.get('s3_object_version')
+ zip_file = module.params.get('zip_file')
+ description = module.params.get('description')
+ timeout = module.params.get('timeout')
+ memory_size = module.params.get('memory_size')
+ vpc_subnet_ids = module.params.get('vpc_subnet_ids')
+ vpc_security_group_ids = module.params.get('vpc_security_group_ids')
+
+ check_mode = module.check_mode
+ changed = False
+
+ if not HAS_BOTOCORE:
+ module.fail_json(msg='Python module "botocore" is missing, please install it')
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='Python module "boto3" is missing, please install it')
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if not region:
+ module.fail_json(msg='region must be specified')
+
+ try:
+ client = boto3_conn(module, conn_type='client', resource='lambda',
+ region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
+ module.fail_json(msg=str(e))
+
+ if role.startswith('arn:aws:iam'):
+ role_arn = role
+ else:
+ # get account ID and assemble ARN
+ try:
+ iam_client = boto3_conn(module, conn_type='client', resource='iam',
+ region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ account_id = iam_client.get_user()['User']['Arn'].split(':')[4]
+ role_arn = 'arn:aws:iam::{0}:role/{1}'.format(account_id, role)
+ except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
+ module.fail_json(msg=str(e))
+
+ # Get function configuration if present, False otherwise
+ current_function = get_current_function(client, name)
+
+ # Update existing Lambda function
+ if state == 'present' and current_function:
+
+ # Get current state
+ current_config = current_function['Configuration']
+ current_version = None
+
+ # Update function configuration
+ func_kwargs = {'FunctionName': name}
+
+ # Update configuration if needed
+ if role_arn and current_config['Role'] != role_arn:
+ func_kwargs.update({'Role': role_arn})
+ if handler and current_config['Handler'] != handler:
+ func_kwargs.update({'Handler': handler})
+ if description and current_config['Description'] != description:
+ func_kwargs.update({'Description': description})
+ if timeout and current_config['Timeout'] != timeout:
+ func_kwargs.update({'Timeout': timeout})
+ if memory_size and current_config['MemorySize'] != memory_size:
+ func_kwargs.update({'MemorySize': memory_size})
+
+ # Check for unsupported mutation
+ if current_config['Runtime'] != runtime:
+ module.fail_json(msg='Cannot change runtime. Please recreate the function')
+
+ # If VPC configuration is desired
+ if vpc_subnet_ids or vpc_security_group_ids:
+ if len(vpc_subnet_ids) < 1:
+ module.fail_json(msg='At least 1 subnet is required')
+
+ if len(vpc_security_group_ids) < 1:
+ module.fail_json(msg='At least 1 security group is required')
+
+ if 'VpcConfig' in current_config:
+ # Compare VPC config with current config
+ current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds']
+ current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds']
+
+ subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids)
+ vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids)
+
+ if any((subnet_net_id_changed, vpc_security_group_ids_changed)):
+ func_kwargs.update({'VpcConfig':
+ {'SubnetIds': vpc_subnet_ids,'SecurityGroupIds': vpc_security_group_ids}})
+ else:
+ # No VPC configuration is desired, assure VPC config is empty when present in current config
+ if ('VpcConfig' in current_config and
+ 'VpcId' in current_config['VpcConfig'] and
+ current_config['VpcConfig']['VpcId'] != ''):
+ func_kwargs.update({'VpcConfig':{'SubnetIds': [], 'SecurityGroupIds': []}})
+
+ # Upload new configuration if configuration has changed
+ if len(func_kwargs) > 2:
+ try:
+ if not check_mode:
+ response = client.update_function_configuration(**func_kwargs)
+ current_version = response['Version']
+ changed = True
+ except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e:
+ module.fail_json(msg=str(e))
+
+ # Update code configuration
+ code_kwargs = {'FunctionName': name, 'Publish': True}
+
+ # Update S3 location
+ if s3_bucket and s3_key:
+ # If function is stored on S3 always update
+ code_kwargs.update({'S3Bucket': s3_bucket, 'S3Key': s3_key})
+
+ # If S3 Object Version is given
+ if s3_object_version:
+ code_kwargs.update({'S3ObjectVersion': s3_object_version})
+
+ # Compare local checksum, update remote code when different
+ elif zip_file:
+ local_checksum = sha256sum(zip_file)
+ remote_checksum = current_config['CodeSha256']
+
+ # Only upload new code when local code is different compared to the remote code
+ if local_checksum != remote_checksum:
+ try:
+ with open(zip_file, 'rb') as f:
+ encoded_zip = f.read()
+ code_kwargs.update({'ZipFile': encoded_zip})
+ except IOError as e:
+ module.fail_json(msg=str(e))
+
+ # Upload new code if needed (e.g. code checksum has changed)
+ if len(code_kwargs) > 2:
+ try:
+ if not check_mode:
+ response = client.update_function_code(**code_kwargs)
+ current_version = response['Version']
+ changed = True
+ except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e:
+ module.fail_json(msg=str(e))
+
+ # Describe function code and configuration
+ response = get_current_function(client, name, qualifier=current_version)
+ if not response:
+ module.fail_json(msg='Unable to get function information after updating')
+
+ # We're done
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
+
+ # Function doesn't exists, create new Lambda function
+ elif state == 'present':
+ if s3_bucket and s3_key:
+ # If function is stored on S3
+ code = {'S3Bucket': s3_bucket,
+ 'S3Key': s3_key}
+ if s3_object_version:
+ code.update({'S3ObjectVersion': s3_object_version})
+ elif zip_file:
+ # If function is stored in local zipfile
+ try:
+ with open(zip_file, 'rb') as f:
+ zip_content = f.read()
+
+ code = {'ZipFile': zip_content}
+ except IOError as e:
+ module.fail_json(msg=str(e))
+
+ else:
+ module.fail_json(msg='Either S3 object or path to zipfile required')
+
+ func_kwargs = {'FunctionName': name,
+ 'Description': description,
+ 'Publish': True,
+ 'Runtime': runtime,
+ 'Role': role_arn,
+ 'Handler': handler,
+ 'Code': code,
+ 'Timeout': timeout,
+ 'MemorySize': memory_size,
+ }
+
+ # If VPC configuration is given
+ if vpc_subnet_ids or vpc_security_group_ids:
+ if len(vpc_subnet_ids) < 1:
+ module.fail_json(msg='At least 1 subnet is required')
+
+ if len(vpc_security_group_ids) < 1:
+ module.fail_json(msg='At least 1 security group is required')
+
+ func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids,
+ 'SecurityGroupIds': vpc_security_group_ids}})
+
+ # Finally try to create function
+ try:
+ if not check_mode:
+ response = client.create_function(**func_kwargs)
+ current_version = response['Version']
+ changed = True
+ except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e:
+ module.fail_json(msg=str(e))
+
+ response = get_current_function(client, name, qualifier=current_version)
+ if not response:
+ module.fail_json(msg='Unable to get function information after creating')
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(response))
+
+ # Delete existing Lambda function
+ if state == 'absent' and current_function:
+ try:
+ if not check_mode:
+ client.delete_function(FunctionName=name)
+ changed = True
+ except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed)
+
+ # Function already absent, do nothing
+ elif state == 'absent':
+ module.exit_json(changed=changed)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/lambda_alias.py b/lib/ansible/modules/cloud/amazon/lambda_alias.py
new file mode 100644
index 0000000000..a06880e410
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/lambda_alias.py
@@ -0,0 +1,389 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: lambda_alias
+short_description: Creates, updates or deletes AWS Lambda function aliases.
+description:
+ - This module allows the management of AWS Lambda functions aliases via the Ansible
+ framework. It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda function
+ itself and M(lambda_event) to manage event source mappings.
+
+version_added: "2.2"
+
+author: Pierre Jodouin (@pjodouin), Ryan Scott Brown (@ryansb)
+options:
+ function_name:
+ description:
+ - The name of the function alias.
+ required: true
+ state:
+ description:
+ - Describes the desired state.
+ required: true
+ default: "present"
+ choices: ["present", "absent"]
+ name:
+ description:
+ - Name of the function alias.
+ required: true
+ aliases: ['alias_name']
+ description:
+ description:
+ - A short, user-defined function alias description.
+ required: false
+ version:
+ description:
+ - Version associated with the Lambda function alias.
+ A value of 0 (or omitted parameter) sets the alias to the $LATEST version.
+ required: false
+ aliases: ['function_version']
+requirements:
+ - boto3
+extends_documentation_fragment:
+ - aws
+
+'''
+
+EXAMPLES = '''
+---
+# Simple example to create a lambda function and publish a version
+- hosts: localhost
+ gather_facts: no
+ vars:
+ state: present
+ project_folder: /path/to/deployment/package
+ deployment_package: lambda.zip
+ account: 123456789012
+ production_version: 5
+ tasks:
+ - name: AWS Lambda Function
+ lambda:
+ state: "{{ state | default('present') }}"
+ name: myLambdaFunction
+ publish: True
+ description: lambda function description
+ code_s3_bucket: package-bucket
+ code_s3_key: "lambda/{{ deployment_package }}"
+ local_path: "{{ project_folder }}/{{ deployment_package }}"
+ runtime: python2.7
+ timeout: 5
+ handler: lambda.handler
+ memory_size: 128
+ role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole"
+
+ - name: show results
+ debug:
+ var: lambda_facts
+
+# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0)
+ - name: "alias 'Dev' for function {{ lambda_facts.FunctionName }} "
+ lambda_alias:
+ state: "{{ state | default('present') }}"
+ function_name: "{{ lambda_facts.FunctionName }}"
+ name: Dev
+ description: Development is $LATEST version
+
+# The QA alias will only be created when a new version is published (i.e. not = '$LATEST')
+ - name: "alias 'QA' for function {{ lambda_facts.FunctionName }} "
+ lambda_alias:
+ state: "{{ state | default('present') }}"
+ function_name: "{{ lambda_facts.FunctionName }}"
+ name: QA
+ version: "{{ lambda_facts.Version }}"
+ description: "QA is version {{ lambda_facts.Version }}"
+ when: lambda_facts.Version != "$LATEST"
+
+# The Prod alias will have a fixed version based on a variable
+ - name: "alias 'Prod' for function {{ lambda_facts.FunctionName }} "
+ lambda_alias:
+ state: "{{ state | default('present') }}"
+ function_name: "{{ lambda_facts.FunctionName }}"
+ name: Prod
+ version: "{{ production_version }}"
+ description: "Production is version {{ production_version }}"
+'''
+
+RETURN = '''
+---
+alias_arn:
+ description: Full ARN of the function, including the alias
+ returned: success
+ type: string
+ sample: arn:aws:lambda:us-west-2:123456789012:function:myFunction:dev
+description:
+ description: A short description of the alias
+ returned: success
+ type: string
+ sample: The development stage for my hot new app
+function_version:
+ description: The qualifier that the alias refers to
+ returned: success
+ type: string
+ sample: $LATEST
+name:
+ description: The name of the alias assigned
+ returned: success
+ type: string
+ sample: dev
+'''
+
+
+class AWSConnection:
+ """
+ Create the connection object and client objects as required.
+ """
+
+ def __init__(self, ansible_obj, resources, boto3=True):
+
+ try:
+ self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=boto3)
+
+ self.resource_client = dict()
+ if not resources:
+ resources = ['lambda']
+
+ resources.append('iam')
+
+ for resource in resources:
+ aws_connect_kwargs.update(dict(region=self.region,
+ endpoint=self.endpoint,
+ conn_type='client',
+ resource=resource
+ ))
+ self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
+
+ # if region is not provided, then get default profile/session region
+ if not self.region:
+ self.region = self.resource_client['lambda'].meta.region_name
+
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
+
+ try:
+ self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
+ except (ClientError, ValueError, KeyError, IndexError):
+ self.account_id = ''
+
+ def client(self, resource='lambda'):
+ return self.resource_client[resource]
+
+
+def pc(key):
+ """
+ Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
+
+ :param key:
+ :return:
+ """
+
+ return "".join([token.capitalize() for token in key.split('_')])
+
+
+def set_api_params(module, module_params):
+ """
+ Sets module parameters to those expected by the boto3 API.
+
+ :param module:
+ :param module_params:
+ :return:
+ """
+
+ api_params = dict()
+
+ for param in module_params:
+ module_param = module.params.get(param, None)
+ if module_param:
+ api_params[pc(param)] = module_param
+
+ return api_params
+
+
+def validate_params(module, aws):
+ """
+ Performs basic parameter validation.
+
+ :param module: Ansible module reference
+ :param aws: AWS client connection
+ :return:
+ """
+
+ function_name = module.params['function_name']
+
+ # validate function name
+ if not re.search('^[\w\-:]+$', function_name):
+ module.fail_json(
+ msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
+ )
+ if len(function_name) > 64:
+ module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
+
+ # if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string
+ if module.params['function_version'] == 0:
+ module.params['function_version'] = '$LATEST'
+ else:
+ module.params['function_version'] = str(module.params['function_version'])
+
+ return
+
+
+def get_lambda_alias(module, aws):
+ """
+ Returns the lambda function alias if it exists.
+
+ :param module: Ansible module reference
+ :param aws: AWS client connection
+ :return:
+ """
+
+ client = aws.client('lambda')
+
+ # set API parameters
+ api_params = set_api_params(module, ('function_name', 'name'))
+
+ # check if alias exists and get facts
+ try:
+ results = client.get_alias(**api_params)
+
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ results = None
+ else:
+ module.fail_json(msg='Error retrieving function alias: {0}'.format(e))
+
+ return results
+
+
+def lambda_alias(module, aws):
+ """
+ Adds, updates or deletes lambda function aliases.
+
+ :param module: Ansible module reference
+ :param aws: AWS client connection
+ :return dict:
+ """
+ client = aws.client('lambda')
+ results = dict()
+ changed = False
+ current_state = 'absent'
+ state = module.params['state']
+
+ facts = get_lambda_alias(module, aws)
+ if facts:
+ current_state = 'present'
+
+ if state == 'present':
+ if current_state == 'present':
+
+ # check if alias has changed -- only version and description can change
+ alias_params = ('function_version', 'description')
+ for param in alias_params:
+ if module.params.get(param) != facts.get(pc(param)):
+ changed = True
+ break
+
+ if changed:
+ api_params = set_api_params(module, ('function_name', 'name'))
+ api_params.update(set_api_params(module, alias_params))
+
+ if not module.check_mode:
+ try:
+ results = client.update_alias(**api_params)
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error updating function alias: {0}'.format(e))
+
+ else:
+ # create new function alias
+ api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description'))
+
+ try:
+ if not module.check_mode:
+ results = client.create_alias(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error creating function alias: {0}'.format(e))
+
+ else: # state = 'absent'
+ if current_state == 'present':
+ # delete the function
+ api_params = set_api_params(module, ('function_name', 'name'))
+
+ try:
+ if not module.check_mode:
+ results = client.delete_alias(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error deleting function alias: {0}'.format(e))
+
+ return dict(changed=changed, **dict(results or facts))
+
+
+def main():
+ """
+ Main entry point.
+
+ :return dict: ansible facts
+ """
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ function_name=dict(required=True, default=None),
+ name=dict(required=True, default=None, aliases=['alias_name']),
+ function_version=dict(type='int', required=False, default=0, aliases=['version']),
+ description=dict(required=False, default=None),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[],
+ required_together=[]
+ )
+
+ # validate dependencies
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required for this module.')
+
+ aws = AWSConnection(module, ['lambda'])
+
+ validate_params(module, aws)
+
+ results = lambda_alias(module, aws)
+
+ module.exit_json(**camel_dict_to_snake_dict(results))
+
+
+# ansible import module(s) kept at ~eof as recommended
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/lambda_event.py b/lib/ansible/modules/cloud/amazon/lambda_event.py
new file mode 100644
index 0000000000..acb057a8de
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/lambda_event.py
@@ -0,0 +1,427 @@
+#!/usr/bin/python
+# (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions>
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+import sys
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: lambda_event
+short_description: Creates, updates or deletes AWS Lambda function event mappings.
+description:
+ - This module allows the management of AWS Lambda function event source mappings such as DynamoDB and Kinesis stream
+ events via the Ansible framework. These event source mappings are relevant only in the AWS Lambda pull model, where
+ AWS Lambda invokes the function.
+ It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda
+ function itself and M(lambda_alias) to manage function aliases.
+
+version_added: "2.2"
+
+author: Pierre Jodouin (@pjodouin), Ryan Brown (@ryansb)
+options:
+ lambda_function_arn:
+ description:
+ - The name or ARN of the lambda function.
+ required: true
+ aliases: ['function_name', 'function_arn']
+ state:
+ description:
+ - Describes the desired state.
+ required: true
+ default: "present"
+ choices: ["present", "absent"]
+ alias:
+ description:
+ - Name of the function alias. Mutually exclusive with C(version).
+ required: true
+ version:
+ description:
+ - Version of the Lambda function. Mutually exclusive with C(alias).
+ required: false
+ event_source:
+ description:
+ - Source of the event that triggers the lambda function.
+ required: false
+ default: stream
+ choices: ['stream']
+ source_params:
+ description:
+ - Sub-parameters required for event source.
+ - I(== stream event source ==)
+ - C(source_arn) The Amazon Resource Name (ARN) of the Kinesis or DynamoDB stream that is the event source.
+ - C(enabled) Indicates whether AWS Lambda should begin polling the event source. Default is True.
+ - C(batch_size) The largest number of records that AWS Lambda will retrieve from your event source at the
+ time of invoking your function. Default is 100.
+ - C(starting_position) The position in the stream where AWS Lambda should start reading.
+ Choices are TRIM_HORIZON or LATEST.
+ required: true
+requirements:
+ - boto3
+extends_documentation_fragment:
+ - aws
+
+'''
+
+EXAMPLES = '''
+---
+# Example that creates a lambda event notification for a DynamoDB stream
+- hosts: localhost
+ gather_facts: no
+ vars:
+ state: present
+ tasks:
+ - name: DynamoDB stream event mapping
+ lambda_event:
+ state: "{{ state | default('present') }}"
+ event_source: stream
+ function_name: "{{ function_name }}"
+ alias: Dev
+ source_params:
+ source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457
+ enabled: True
+ batch_size: 100
+ starting_position: TRIM_HORIZON
+
+ - name: Show source event
+ debug:
+ var: lambda_stream_events
+'''
+
+RETURN = '''
+---
+lambda_stream_events:
+ description: list of dictionaries returned by the API describing stream event mappings
+ returned: success
+ type: list
+'''
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Helper Functions & classes
+#
+# ---------------------------------------------------------------------------------------------------
+
+
+class AWSConnection:
+ """
+ Create the connection object and client objects as required.
+ """
+
+ def __init__(self, ansible_obj, resources, use_boto3=True):
+
+ try:
+ self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3)
+
+ self.resource_client = dict()
+ if not resources:
+ resources = ['lambda']
+
+ resources.append('iam')
+
+ for resource in resources:
+ aws_connect_kwargs.update(dict(region=self.region,
+ endpoint=self.endpoint,
+ conn_type='client',
+ resource=resource
+ ))
+ self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs)
+
+ # if region is not provided, then get default profile/session region
+ if not self.region:
+ self.region = self.resource_client['lambda'].meta.region_name
+
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e))
+
+ # set account ID
+ try:
+ self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4]
+ except (ClientError, ValueError, KeyError, IndexError):
+ self.account_id = ''
+
+ def client(self, resource='lambda'):
+ return self.resource_client[resource]
+
+
+def pc(key):
+ """
+ Changes python key into Pascale case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
+
+ :param key:
+ :return:
+ """
+
+ return "".join([token.capitalize() for token in key.split('_')])
+
+
+def ordered_obj(obj):
+ """
+ Order object for comparison purposes
+
+ :param obj:
+ :return:
+ """
+
+ if isinstance(obj, dict):
+ return sorted((k, ordered_obj(v)) for k, v in obj.items())
+ if isinstance(obj, list):
+ return sorted(ordered_obj(x) for x in obj)
+ else:
+ return obj
+
+
+def set_api_sub_params(params):
+ """
+ Sets module sub-parameters to those expected by the boto3 API.
+
+ :param params:
+ :return:
+ """
+
+ api_params = dict()
+
+ for param in params.keys():
+ param_value = params.get(param, None)
+ if param_value:
+ api_params[pc(param)] = param_value
+
+ return api_params
+
+
+def validate_params(module, aws):
+ """
+ Performs basic parameter validation.
+
+ :param module:
+ :param aws:
+ :return:
+ """
+
+ function_name = module.params['lambda_function_arn']
+
+ # validate function name
+ if not re.search('^[\w\-:]+$', function_name):
+ module.fail_json(
+ msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
+ )
+ if len(function_name) > 64:
+ module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
+
+ # check if 'function_name' needs to be expanded in full ARN format
+ if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'):
+ function_name = module.params['lambda_function_arn']
+ module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name)
+
+ qualifier = get_qualifier(module)
+ if qualifier:
+ function_arn = module.params['lambda_function_arn']
+ module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier)
+
+ return
+
+
+def get_qualifier(module):
+ """
+ Returns the function qualifier as a version or alias or None.
+
+ :param module:
+ :return:
+ """
+
+ qualifier = None
+ if module.params['version'] > 0:
+ qualifier = str(module.params['version'])
+ elif module.params['alias']:
+ qualifier = str(module.params['alias'])
+
+ return qualifier
+
+
+# ---------------------------------------------------------------------------------------------------
+#
+# Lambda Event Handlers
+#
+# This section defines a lambda_event_X function where X is an AWS service capable of initiating
+# the execution of a Lambda function (pull only).
+#
+# ---------------------------------------------------------------------------------------------------
+
+def lambda_event_stream(module, aws):
+ """
+ Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications.
+ :param module:
+ :param aws:
+ :return:
+ """
+
+ client = aws.client('lambda')
+ facts = dict()
+ changed = False
+ current_state = 'absent'
+ state = module.params['state']
+
+ api_params = dict(FunctionName=module.params['lambda_function_arn'])
+
+ # check if required sub-parameters are present and valid
+ source_params = module.params['source_params']
+
+ source_arn = source_params.get('source_arn')
+ if source_arn:
+ api_params.update(EventSourceArn=source_arn)
+ else:
+ module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.")
+
+ # check if optional sub-parameters are valid, if present
+ batch_size = source_params.get('batch_size')
+ if batch_size:
+ try:
+ source_params['batch_size'] = int(batch_size)
+ except ValueError:
+ module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size']))
+
+ # optional boolean value needs special treatment as not present does not imply False
+ source_param_enabled = module.boolean(source_params.get('enabled', 'True'))
+
+ # check if event mapping exist
+ try:
+ facts = client.list_event_source_mappings(**api_params)['EventSourceMappings']
+ if facts:
+ current_state = 'present'
+ except ClientError as e:
+ module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e))
+
+ if state == 'present':
+ if current_state == 'absent':
+
+ starting_position = source_params.get('starting_position')
+ if starting_position:
+ api_params.update(StartingPosition=starting_position)
+ else:
+ module.fail_json(msg="Source parameter 'starting_position' is required for stream event notification.")
+
+ if source_arn:
+ api_params.update(Enabled=source_param_enabled)
+ if source_params.get('batch_size'):
+ api_params.update(BatchSize=source_params.get('batch_size'))
+
+ try:
+ if not module.check_mode:
+ facts = client.create_event_source_mapping(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e))
+
+ else:
+ # current_state is 'present'
+ api_params = dict(FunctionName=module.params['lambda_function_arn'])
+ current_mapping = facts[0]
+ api_params.update(UUID=current_mapping['UUID'])
+ mapping_changed = False
+
+ # check if anything changed
+ if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']:
+ api_params.update(BatchSize=source_params['batch_size'])
+ mapping_changed = True
+
+ if source_param_enabled is not None:
+ if source_param_enabled:
+ if current_mapping['State'] not in ('Enabled', 'Enabling'):
+ api_params.update(Enabled=True)
+ mapping_changed = True
+ else:
+ if current_mapping['State'] not in ('Disabled', 'Disabling'):
+ api_params.update(Enabled=False)
+ mapping_changed = True
+
+ if mapping_changed:
+ try:
+ if not module.check_mode:
+ facts = client.update_event_source_mapping(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e))
+
+ else:
+ if current_state == 'present':
+ # remove the stream event mapping
+ api_params = dict(UUID=facts[0]['UUID'])
+
+ try:
+ if not module.check_mode:
+ facts = client.delete_event_source_mapping(**api_params)
+ changed = True
+ except (ClientError, ParamValidationError, MissingParametersError) as e:
+ module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e))
+
+ return camel_dict_to_snake_dict(dict(changed=changed, events=facts))
+
+
+def main():
+ """Produce a list of function suffixes which handle lambda events."""
+ this_module = sys.modules[__name__]
+ source_choices = ["stream"]
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(required=False, default='present', choices=['present', 'absent']),
+ lambda_function_arn=dict(required=True, default=None, aliases=['function_name', 'function_arn']),
+ event_source=dict(required=False, default="stream", choices=source_choices),
+ source_params=dict(type='dict', required=True, default=None),
+ alias=dict(required=False, default=None),
+ version=dict(type='int', required=False, default=0),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[['alias', 'version']],
+ required_together=[]
+ )
+
+ # validate dependencies
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required for this module.')
+
+ aws = AWSConnection(module, ['lambda'])
+
+ validate_params(module, aws)
+
+ this_module_function = getattr(this_module, 'lambda_event_{}'.format(module.params['event_source'].lower()))
+
+ results = this_module_function(module, aws)
+
+ module.exit_json(**results)
+
+
+# ansible import module(s) kept at ~eof as recommended
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/lambda_facts.py b/lib/ansible/modules/cloud/amazon/lambda_facts.py
new file mode 100644
index 0000000000..ac3db66794
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/lambda_facts.py
@@ -0,0 +1,413 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+import datetime
+import sys
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: lambda_facts
+short_description: Gathers AWS Lambda function details as Ansible facts
+description:
+ - Gathers various details related to Lambda functions, including aliases, versions and event source mappings.
+ Use module M(lambda) to manage the lambda function itself, M(lambda_alias) to manage function aliases and
+ M(lambda_event) to manage lambda event source mappings.
+
+version_added: "2.2"
+
+options:
+ query:
+ description:
+ - Specifies the resource type for which to gather facts. Leave blank to retrieve all facts.
+ required: true
+ choices: [ "aliases", "all", "config", "mappings", "policy", "versions" ]
+ default: "all"
+ function_name:
+ description:
+ - The name of the lambda function for which facts are requested.
+ required: false
+ default: null
+ aliases: [ "function", "name"]
+ event_source_arn:
+ description:
+ - For query type 'mappings', this is the Amazon Resource Name (ARN) of the Amazon Kinesis or DynamoDB stream.
+ default: null
+ required: false
+author: Pierre Jodouin (@pjodouin)
+requirements:
+ - boto3
+extends_documentation_fragment:
+ - aws
+
+'''
+
+EXAMPLES = '''
+---
+# Simple example of listing all info for a function
+- name: List all for a specific function
+ lambda_facts:
+ query: all
+ function_name: myFunction
+ register: my_function_details
+# List all versions of a function
+- name: List function versions
+ lambda_facts:
+ query: versions
+ function_name: myFunction
+ register: my_function_versions
+# List all lambda function versions
+- name: List all function
+ lambda_facts:
+ query: all
+ max_items: 20
+- name: show Lambda facts
+ debug:
+ var: lambda_facts
+'''
+
+RETURN = '''
+---
+lambda_facts:
+ description: lambda facts
+ returned: success
+ type: dict
+lambda_facts.function:
+ description: lambda function list
+ returned: success
+ type: dict
+lambda_facts.function.TheName:
+ description: lambda function information, including event, mapping, and version information
+ returned: success
+ type: dict
+'''
+
+
+def fix_return(node):
+ """
+ fixup returned dictionary
+
+ :param node:
+ :return:
+ """
+
+ if isinstance(node, datetime.datetime):
+ node_value = str(node)
+
+ elif isinstance(node, list):
+ node_value = [fix_return(item) for item in node]
+
+ elif isinstance(node, dict):
+ node_value = dict([(item, fix_return(node[item])) for item in node.keys()])
+
+ else:
+ node_value = node
+
+ return node_value
+
+
+def alias_details(client, module):
+ """
+ Returns list of aliases for a specified function.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ lambda_facts = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ params = dict()
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+ try:
+ lambda_facts.update(aliases=client.list_aliases(FunctionName=function_name, **params)['Aliases'])
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(aliases=[])
+ else:
+ module.fail_json(msg='Unable to get {0} aliases, error: {1}'.format(function_name, e))
+ else:
+ module.fail_json(msg='Parameter function_name required for query=aliases.')
+
+ return {function_name: camel_dict_to_snake_dict(lambda_facts)}
+
+
+def all_details(client, module):
+ """
+ Returns all lambda related facts.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ if module.params.get('max_items') or module.params.get('next_marker'):
+ module.fail_json(msg='Cannot specify max_items nor next_marker for query=all.')
+
+ lambda_facts = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ lambda_facts[function_name] = {}
+ lambda_facts[function_name].update(config_details(client, module)[function_name])
+ lambda_facts[function_name].update(alias_details(client, module)[function_name])
+ lambda_facts[function_name].update(policy_details(client, module)[function_name])
+ lambda_facts[function_name].update(version_details(client, module)[function_name])
+ lambda_facts[function_name].update(mapping_details(client, module)[function_name])
+ else:
+ lambda_facts.update(config_details(client, module))
+
+ return lambda_facts
+
+
+def config_details(client, module):
+ """
+ Returns configuration details for one or all lambda functions.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ lambda_facts = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ try:
+ lambda_facts.update(client.get_function_configuration(FunctionName=function_name))
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(function={})
+ else:
+ module.fail_json(msg='Unable to get {0} configuration, error: {1}'.format(function_name, e))
+ else:
+ params = dict()
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ try:
+ lambda_facts.update(function_list=client.list_functions(**params)['Functions'])
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(function_list=[])
+ else:
+ module.fail_json(msg='Unable to get function list, error: {0}'.format(e))
+
+ functions = dict()
+ for func in lambda_facts.pop('function_list', []):
+ functions[func['FunctionName']] = camel_dict_to_snake_dict(func)
+ return functions
+
+ return {function_name: camel_dict_to_snake_dict(lambda_facts)}
+
+
+def mapping_details(client, module):
+ """
+ Returns all lambda event source mappings.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ lambda_facts = dict()
+ params = dict()
+ function_name = module.params.get('function_name')
+
+ if function_name:
+ params['FunctionName'] = module.params.get('function_name')
+
+ if module.params.get('event_source_arn'):
+ params['EventSourceArn'] = module.params.get('event_source_arn')
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ try:
+ lambda_facts.update(mappings=client.list_event_source_mappings(**params)['EventSourceMappings'])
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(mappings=[])
+ else:
+ module.fail_json(msg='Unable to get source event mappings, error: {0}'.format(e))
+
+ if function_name:
+ return {function_name: camel_dict_to_snake_dict(lambda_facts)}
+
+ return camel_dict_to_snake_dict(lambda_facts)
+
+
+def policy_details(client, module):
+ """
+ Returns policy attached to a lambda function.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ if module.params.get('max_items') or module.params.get('next_marker'):
+ module.fail_json(msg='Cannot specify max_items nor next_marker for query=policy.')
+
+ lambda_facts = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ try:
+ # get_policy returns a JSON string so must convert to dict before reassigning to its key
+ lambda_facts.update(policy=json.loads(client.get_policy(FunctionName=function_name)['Policy']))
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(policy={})
+ else:
+ module.fail_json(msg='Unable to get {0} policy, error: {1}'.format(function_name, e))
+ else:
+ module.fail_json(msg='Parameter function_name required for query=policy.')
+
+ return {function_name: camel_dict_to_snake_dict(lambda_facts)}
+
+
+def version_details(client, module):
+ """
+ Returns all lambda function versions.
+
+ :param client: AWS API client reference (boto3)
+ :param module: Ansible module reference
+ :return dict:
+ """
+
+ lambda_facts = dict()
+
+ function_name = module.params.get('function_name')
+ if function_name:
+ params = dict()
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ try:
+ lambda_facts.update(versions=client.list_versions_by_function(FunctionName=function_name, **params)['Versions'])
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'ResourceNotFoundException':
+ lambda_facts.update(versions=[])
+ else:
+ module.fail_json(msg='Unable to get {0} versions, error: {1}'.format(function_name, e))
+ else:
+ module.fail_json(msg='Parameter function_name required for query=versions.')
+
+ return {function_name: camel_dict_to_snake_dict(lambda_facts)}
+
+
+def main():
+ """
+ Main entry point.
+
+ :return dict: ansible facts
+ """
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ function_name=dict(required=False, default=None, aliases=['function', 'name']),
+ query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions'], default='all'),
+ event_source_arn=dict(required=False, default=None)
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ mutually_exclusive=[],
+ required_together=[]
+ )
+
+ # validate dependencies
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 is required for this module.')
+
+ # validate function_name if present
+ function_name = module.params['function_name']
+ if function_name:
+ if not re.search("^[\w\-:]+$", function_name):
+ module.fail_json(
+ msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name)
+ )
+ if len(function_name) > 64:
+ module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
+
+ try:
+ region, endpoint, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ aws_connect_kwargs.update(dict(region=region,
+ endpoint=endpoint,
+ conn_type='client',
+ resource='lambda'
+ ))
+ client = boto3_conn(module, **aws_connect_kwargs)
+ except ClientError as e:
+ module.fail_json(msg="Can't authorize connection - {0}".format(e))
+
+ this_module = sys.modules[__name__]
+
+ invocations = dict(
+ aliases='alias_details',
+ all='all_details',
+ config='config_details',
+ mappings='mapping_details',
+ policy='policy_details',
+ versions='version_details',
+ )
+
+ this_module_function = getattr(this_module, invocations[module.params['query']])
+ all_facts = fix_return(this_module_function(client, module))
+
+ results = dict(ansible_facts={'lambda_facts': {'function': all_facts}}, changed=False)
+
+ if module.check_mode:
+ results['msg'] = 'Check mode set but ignored for fact gathering only.'
+
+ module.exit_json(**results)
+
+
+# ansible import module(s) kept at ~eof as recommended
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/redshift.py b/lib/ansible/modules/cloud/amazon/redshift.py
new file mode 100644
index 0000000000..a1ae146a42
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/redshift.py
@@ -0,0 +1,479 @@
+#!/usr/bin/python
+
+# Copyright 2014 Jens Carl, Hothead Games Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+author:
+ - "Jens Carl (@j-carl), Hothead Games Inc."
+module: redshift
+version_added: "2.2"
+short_description: create, delete, or modify an Amazon Redshift instance
+description:
+ - Creates, deletes, or modifies amazon Redshift cluster instances.
+options:
+ command:
+ description:
+ - Specifies the action to take.
+ required: true
+ choices: [ 'create', 'facts', 'delete', 'modify' ]
+ identifier:
+ description:
+ - Redshift cluster identifier.
+ required: true
+ node_type:
+ description:
+ - The node type of the cluster. Must be specified when command=create.
+ choices: ['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge']
+ username:
+ description:
+ - Master database username. Used only when command=create.
+ password:
+ description:
+ - Master database password. Used only when command=create.
+ cluster_type:
+ description:
+ - The type of cluster.
+ choices: ['multi-node', 'single-node' ]
+ default: 'single-node'
+ db_name:
+ description:
+ - Name of the database.
+ default: null
+ availability_zone:
+ description:
+ - availability zone in which to launch cluster
+ aliases: ['zone', 'aws_zone']
+ number_of_nodes:
+ description:
+ - Number of nodes. Only used when cluster_type=multi-node.
+ default: null
+ cluster_subnet_group_name:
+ description:
+ - which subnet to place the cluster
+ aliases: ['subnet']
+ cluster_security_groups:
+ description:
+ - in which security group the cluster belongs
+ default: null
+ aliases: ['security_groups']
+ vpc_security_group_ids:
+ description:
+ - VPC security group
+ aliases: ['vpc_security_groups']
+ default: null
+ preferred_maintenance_window:
+ description:
+ - maintenance window
+ aliases: ['maintance_window', 'maint_window']
+ default: null
+ cluster_parameter_group_name:
+ description:
+ - name of the cluster parameter group
+ aliases: ['param_group_name']
+ default: null
+ automated_snapshot_retention_period:
+ description:
+ - period when the snapshot take place
+ aliases: ['retention_period']
+ default: null
+ port:
+ description:
+ - which port the cluster is listining
+ default: null
+ cluster_version:
+ description:
+ - which version the cluster should have
+ aliases: ['version']
+ choices: ['1.0']
+ default: null
+ allow_version_upgrade:
+ description:
+ - flag to determinate if upgrade of version is possible
+ aliases: ['version_upgrade']
+ default: true
+ publicly_accessible:
+ description:
+ - if the cluster is accessible publicly or not
+ default: false
+ encrypted:
+ description:
+ - if the cluster is encrypted or not
+ default: false
+ elastic_ip:
+ description:
+ - if the cluster has an elastic IP or not
+ default: null
+ new_cluster_identifier:
+ description:
+ - Only used when command=modify.
+ aliases: ['new_identifier']
+ default: null
+ wait:
+ description:
+ - When command=create, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
+ default: "no"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+requirements: [ 'boto' ]
+extends_documentation_fragment: aws
+'''
+
+EXAMPLES = '''
+# Basic cluster provisioning example
+- redshift: >
+ command=create
+ node_type=ds1.xlarge
+ identifier=new_cluster
+ username=cluster_admin
+ password=1nsecure
+'''
+
+RETURN = '''
+cluster:
+ description: dictionary containing all the cluster information
+ returned: success
+ type: dictionary
+ contains:
+ identifier:
+ description: Id of the cluster.
+ returned: success
+ type: string
+ sample: "new_redshift_cluster"
+ create_time:
+ description: Time of the cluster creation as timestamp.
+ returned: success
+ type: float
+ sample: 1430158536.308
+ status:
+ description: Stutus of the cluster.
+ returned: success
+ type: string
+ sample: "available"
+ db_name:
+ description: Name of the database.
+ returned: success
+ type: string
+ sample: "new_db_name"
+ availability_zone:
+ description: Amazon availability zone where the cluster is located.
+ returned: success
+ type: string
+ sample: "us-east-1b"
+ maintenance_window:
+ description: Time frame when maintenance/upgrade are done.
+ returned: success
+ type: string
+ sample: "sun:09:30-sun:10:00"
+ private_ip_address:
+ description: Private IP address of the main node.
+ returned: success
+ type: string
+ sample: "10.10.10.10"
+ public_ip_address:
+ description: Public IP address of the main node.
+ returned: success
+ type: string
+ sample: "0.0.0.0"
+ port:
+ description: Port of the cluster.
+ returned: success
+ type: int
+ sample: 5439
+ url:
+ description: FQDN of the main cluster node.
+ returned: success
+ type: string
+ sample: "new-redshift_cluster.jfkdjfdkj.us-east-1.redshift.amazonaws.com"
+'''
+
+import time
+
+try:
+ import boto
+ from boto import redshift
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def _collect_facts(resource):
+ """Transfrom cluster information to dict."""
+ facts = {
+ 'identifier' : resource['ClusterIdentifier'],
+ 'create_time' : resource['ClusterCreateTime'],
+ 'status' : resource['ClusterStatus'],
+ 'username' : resource['MasterUsername'],
+ 'db_name' : resource['DBName'],
+ 'availability_zone' : resource['AvailabilityZone'],
+ 'maintenance_window': resource['PreferredMaintenanceWindow'],
+ }
+
+ for node in resource['ClusterNodes']:
+ if node['NodeRole'] in ('SHARED', 'LEADER'):
+ facts['private_ip_address'] = node['PrivateIPAddress']
+ break
+
+ return facts
+
+
+def create_cluster(module, redshift):
+ """
+ Create a new cluster
+
+ module: AnsibleModule object
+ redshift: authenticated redshift connection object
+
+ Returns:
+ """
+
+ identifier = module.params.get('identifier')
+ node_type = module.params.get('node_type')
+ username = module.params.get('username')
+ password = module.params.get('password')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ changed = True
+ # Package up the optional parameters
+ params = {}
+ for p in ('db_name', 'cluster_type', 'cluster_security_groups',
+ 'vpc_security_group_ids', 'cluster_subnet_group_name',
+ 'availability_zone', 'preferred_maintenance_window',
+ 'cluster_parameter_group_name',
+ 'automated_snapshot_retention_period', 'port',
+ 'cluster_version', 'allow_version_upgrade',
+ 'number_of_nodes', 'publicly_accessible',
+ 'encrypted', 'elastic_ip'):
+ if p in module.params:
+ params[ p ] = module.params.get( p )
+
+ try:
+ redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+ changed = False
+ except boto.exception.JSONResponseError as e:
+ try:
+ redshift.create_cluster(identifier, node_type, username, password, **params)
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ try:
+ resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ if wait:
+ try:
+ wait_timeout = time.time() + wait_timeout
+ time.sleep(5)
+
+ while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
+ time.sleep(5)
+ if wait_timeout <= time.time():
+ module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
+
+ resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ return(changed, _collect_facts(resource))
+
+
+def describe_cluster(module, redshift):
+ """
+ Collect data about the cluster.
+
+ module: Ansible module object
+ redshift: authenticated redshift connection object
+ """
+ identifier = module.params.get('identifier')
+
+ try:
+ resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ return(True, _collect_facts(resource))
+
+
+def delete_cluster(module, redshift):
+ """
+ Delete a cluster.
+
+ module: Ansible module object
+ redshift: authenticated redshift connection object
+ """
+
+ identifier = module.params.get('identifier')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ try:
+ redshift.delete_custer( identifier )
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ if wait:
+ try:
+ wait_timeout = time.time() + wait_timeout
+ resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+
+ while wait_timeout > time.time() and resource['ClusterStatus'] != 'deleting':
+ time.sleep(5)
+ if wait_timeout <= time.time():
+ module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
+
+ resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ return(True, {})
+
+
+def modify_cluster(module, redshift):
+ """
+ Modify an existing cluster.
+
+ module: Ansible module object
+ redshift: authenticated redshift connection object
+ """
+
+ identifier = module.params.get('identifier')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ # Package up the optional parameters
+ params = {}
+ for p in ('cluster_type', 'cluster_security_groups',
+ 'vpc_security_group_ids', 'cluster_subnet_group_name',
+ 'availability_zone', 'preferred_maintenance_window',
+ 'cluster_parameter_group_name',
+ 'automated_snapshot_retention_period', 'port', 'cluster_version',
+ 'allow_version_upgrade', 'number_of_nodes', 'new_cluster_identifier'):
+ if p in module.params:
+ params[p] = module.params.get(p)
+
+ try:
+ redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+ except boto.exception.JSONResponseError as e:
+ try:
+ redshift.modify_cluster(identifier, **params)
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ try:
+ resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ if wait:
+ try:
+ wait_timeout = time.time() + wait_timeout
+ time.sleep(5)
+
+ while wait_timeout > time.time() and resource['ClusterStatus'] != 'available':
+ time.sleep(5)
+ if wait_timeout <= time.time():
+ module.fail_json(msg = "Timeout waiting for resource %s" % resource.id)
+
+ resource = redshift.describe_clusters(identifier)['DescribeClustersResponse']['DescribeClustersResult']['Clusters'][0]
+
+ except boto.exception.JSONResponseError as e:
+ # https://github.com/boto/boto/issues/2776 is fixed.
+ module.fail_json(msg=str(e))
+
+ return(True, _collect_facts(resource))
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ command = dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
+ identifier = dict(required=True),
+ node_type = dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'], required=False),
+ username = dict(required=False),
+ password = dict(no_log=True, required=False),
+ db_name = dict(require=False),
+ cluster_type = dict(choices=['multi-node', 'single-node', ], default='single-node'),
+ cluster_security_groups = dict(aliases=['security_groups'], type='list'),
+ vpc_security_group_ids = dict(aliases=['vpc_security_groups'], type='list'),
+ cluster_subnet_group_name = dict(aliases=['subnet']),
+ availability_zone = dict(aliases=['aws_zone', 'zone']),
+ preferred_maintenance_window = dict(aliases=['maintance_window', 'maint_window']),
+ cluster_parameter_group_name = dict(aliases=['param_group_name']),
+ automated_snapshot_retention_period = dict(aliases=['retention_period']),
+ port = dict(type='int'),
+ cluster_version = dict(aliases=['version'], choices=['1.0']),
+ allow_version_upgrade = dict(aliases=['version_upgrade'], type='bool', default=True),
+ number_of_nodes = dict(type='int'),
+ publicly_accessible = dict(type='bool', default=False),
+ encrypted = dict(type='bool', default=False),
+ elastic_ip = dict(required=False),
+ new_cluster_identifier = dict(aliases=['new_identifier']),
+ wait = dict(type='bool', default=False),
+ wait_timeout = dict(default=300),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto v2.9.0+ required for this module')
+
+ command = module.params.get('command')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION."))
+
+ # connect to the rds endpoint
+ try:
+ conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ changed = True
+ if command == 'create':
+ (changed, cluster) = create_cluster(module, conn)
+
+ elif command == 'facts':
+ (changed, cluster) = describe_cluster(module, conn)
+
+ elif command == 'delete':
+ (changed, cluster) = delete_cluster(module, conn)
+
+ elif command == 'modify':
+ (changed, cluster) = modify_cluster(module, conn)
+
+ module.exit_json(changed=changed, cluster=cluster)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/redshift_subnet_group.py b/lib/ansible/modules/cloud/amazon/redshift_subnet_group.py
new file mode 100644
index 0000000000..cecf68209a
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/redshift_subnet_group.py
@@ -0,0 +1,186 @@
+#!/usr/bin/python
+
+# Copyright 2014 Jens Carl, Hothead Games Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+author:
+ - "Jens Carl (@j-carl), Hothead Games Inc."
+module: redshift_subnet_group
+version_added: "2.2"
+short_description: mange Redshift cluster subnet groups
+description:
+ - Create, modifies, and deletes Redshift cluster subnet groups.
+options:
+ state:
+ description:
+ - Specifies whether the subnet should be present or absent.
+ default: 'present'
+ choices: ['present', 'absent' ]
+ group_name:
+ description:
+ - Cluster subnet group name.
+ required: true
+ aliases: ['name']
+ group_description:
+ description:
+ - Database subnet group description.
+ required: false
+ default: null
+ aliases: ['description']
+ group_subnets:
+ description:
+ - List of subnet IDs that make up the cluster subnet group.
+ required: false
+ default: null
+ aliases: ['subnets']
+requirements: [ 'boto' ]
+extends_documentation_fragment: aws
+'''
+
+EXAMPLES = '''
+# Create a Redshift subnet group
+- local_action:
+ module: redshift_subnet_group
+ state: present
+ group_name: redshift-subnet
+ group_description: Redshift subnet
+ group_subnets:
+ - 'subnet-aaaaa'
+ - 'subnet-bbbbb'
+
+# Remove subnet group
+redshift_subnet_group: >
+ state: absent
+ group_name: redshift-subnet
+'''
+
+RETURN = '''
+group:
+ description: dictionary containing all Redshift subnet group information
+ returned: success
+ type: dictionary
+ contains:
+ name:
+ description: name of the Redshift subnet group
+ returned: success
+ type: string
+ sample: "redshift_subnet_group_name"
+ vpc_id:
+ description: Id of the VPC where the subnet is located
+ returned: success
+ type: string
+ sample: "vpc-aabb1122"
+'''
+
+try:
+ import boto
+ import boto.redshift
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(required=True, choices=['present', 'absent']),
+ group_name=dict(required=True, aliases=['name']),
+ group_description=dict(required=False, aliases=['description']),
+ group_subnets=dict(required=False, aliases=['subnets'], type='list'),
+ ))
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto v2.9.0+ required for this module')
+
+ state = module.params.get('state')
+ group_name = module.params.get('group_name')
+ group_description = module.params.get('group_description')
+ group_subnets = module.params.get('group_subnets')
+
+ if state == 'present':
+ for required in ('group_name', 'group_description', 'group_subnets'):
+ if not module.params.get(required):
+ module.fail_json(msg=str("parameter %s required for state='present'" % required))
+ else:
+ for not_allowed in ('group_description', 'group_subnets'):
+ if module.params.get(not_allowed):
+ module.fail_json(msg=str("parameter %s not allowed for state='absent'" % not_allowed))
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION."))
+
+ # Connect to the Redshift endpoint.
+ try:
+ conn = connect_to_aws(boto.redshift, region, **aws_connect_params)
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ try:
+ changed = False
+ exists = False
+ group = None
+
+ try:
+ matching_groups = conn.describe_cluster_subnet_groups(group_name, max_records=100)
+ exists = len(matching_groups) > 0
+ except boto.exception.JSONResponseError as e:
+ if e.body['Error']['Code'] != 'ClusterSubnetGroupNotFoundFault':
+ #if e.code != 'ClusterSubnetGroupNotFoundFault':
+ module.fail_json(msg=str(e))
+
+ if state == 'absent':
+ if exists:
+ conn.delete_cluster_subnet_group(group_name)
+ changed = True
+
+ else:
+ if not exists:
+ new_group = conn.create_cluster_subnet_group(group_name, group_description, group_subnets)
+ group = {
+ 'name': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
+ ['ClusterSubnetGroup']['ClusterSubnetGroupName'],
+ 'vpc_id': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult']
+ ['ClusterSubnetGroup']['VpcId'],
+ }
+ else:
+ changed_group = conn.modify_cluster_subnet_group(group_name, group_subnets, description=group_description)
+ group = {
+ 'name': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
+ ['ClusterSubnetGroup']['ClusterSubnetGroupName'],
+ 'vpc_id': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult']
+ ['ClusterSubnetGroup']['VpcId'],
+ }
+
+ changed = True
+
+ except boto.exception.JSONResponseError as e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(changed=changed, group=group)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/route53_facts.py b/lib/ansible/modules/cloud/amazon/route53_facts.py
new file mode 100644
index 0000000000..6dad5e2164
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/route53_facts.py
@@ -0,0 +1,440 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: route53_facts
+short_description: Retrieves route53 details using AWS methods
+description:
+ - Gets various details related to Route53 zone, record set or health check details
+version_added: "2.0"
+options:
+ query:
+ description:
+ - specifies the query action to take
+ required: True
+ choices: [
+ 'change',
+ 'checker_ip_range',
+ 'health_check',
+ 'hosted_zone',
+ 'record_sets',
+ 'reusable_delegation_set',
+ ]
+ change_id:
+ description:
+ - The ID of the change batch request.
+ The value that you specify here is the value that
+ ChangeResourceRecordSets returned in the Id element
+ when you submitted the request.
+ required: false
+ hosted_zone_id:
+ description:
+ - The Hosted Zone ID of the DNS zone
+ required: false
+ max_items:
+ description:
+ - Maximum number of items to return for various get/list requests
+ required: false
+ next_marker:
+ description:
+ - "Some requests such as list_command: hosted_zones will return a maximum
+ number of entries - EG 100. If the number of entries exceeds this maximum
+ another request can be sent using the NextMarker entry from the first response
+ to get the next page of results"
+ required: false
+ delegation_set_id:
+ description:
+ - The DNS Zone delegation set ID
+ required: false
+ start_record_name:
+ description:
+ - "The first name in the lexicographic ordering of domain names that you want
+ the list_command: record_sets to start listing from"
+ required: false
+ type:
+ description:
+ - The type of DNS record
+ required: false
+ choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS' ]
+ dns_name:
+ description:
+ - The first name in the lexicographic ordering of domain names that you want
+ the list_command to start listing from
+ required: false
+ resource_id:
+ description:
+ - The ID/s of the specified resource/s
+ required: false
+ aliases: ['resource_ids']
+ health_check_id:
+ description:
+ - The ID of the health check
+ required: false
+ hosted_zone_method:
+ description:
+ - "This is used in conjunction with query: hosted_zone.
+ It allows for listing details, counts or tags of various
+ hosted zone details."
+ required: false
+ choices: [
+ 'details',
+ 'list',
+ 'list_by_name',
+ 'count',
+ 'tags',
+ ]
+ default: 'list'
+ health_check_method:
+ description:
+ - "This is used in conjunction with query: health_check.
+ It allows for listing details, counts or tags of various
+ health check details."
+ required: false
+ choices: [
+ 'list',
+ 'details',
+ 'status',
+ 'failure_reason',
+ 'count',
+ 'tags',
+ ]
+ default: 'list'
+author: Karen Cheng(@Etherdaemon)
+extends_documentation_fragment: aws
+'''
+
+EXAMPLES = '''
+# Simple example of listing all hosted zones
+- name: List all hosted zones
+ route53_facts:
+ query: hosted_zone
+ register: hosted_zones
+
+# Getting a count of hosted zones
+- name: Return a count of all hosted zones
+ route53_facts:
+ query: hosted_zone
+ hosted_zone_method: count
+ register: hosted_zone_count
+
+- name: List the first 20 resource record sets in a given hosted zone
+ route53_facts:
+ profile: account_name
+ query: record_sets
+ hosted_zone_id: ZZZ1111112222
+ max_items: 20
+ register: record_sets
+
+- name: List first 20 health checks
+ route53_facts:
+ query: health_check
+ health_check_method: list
+ max_items: 20
+ register: health_checks
+
+- name: Get health check last failure_reason
+ route53_facts:
+ query: health_check
+ health_check_method: failure_reason
+ health_check_id: 00000000-1111-2222-3333-12345678abcd
+ register: health_check_failure_reason
+
+- name: Retrieve reusable delegation set details
+ route53_facts:
+ query: reusable_delegation_set
+ delegation_set_id: delegation id
+ register: delegation_sets
+
+'''
+try:
+ import boto
+ import botocore
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
+
+
+def get_hosted_zone(client, module):
+ params = dict()
+
+ if module.params.get('hosted_zone_id'):
+ params['Id'] = module.params.get('hosted_zone_id')
+ else:
+ module.fail_json(msg="Hosted Zone Id is required")
+
+ results = client.get_hosted_zone(**params)
+ return results
+
+
+def reusable_delegation_set_details(client, module):
+ params = dict()
+ if not module.params.get('delegation_set_id'):
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ results = client.list_reusable_delegation_sets(**params)
+ else:
+ params['DelegationSetId'] = module.params.get('delegation_set_id')
+ results = client.get_reusable_delegation_set(**params)
+
+ return results
+
+
+def list_hosted_zones(client, module):
+ params = dict()
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ if module.params.get('delegation_set_id'):
+ params['DelegationSetId'] = module.params.get('delegation_set_id')
+
+ results = client.list_hosted_zones(**params)
+ return results
+
+
+def list_hosted_zones_by_name(client, module):
+ params = dict()
+
+ if module.params.get('hosted_zone_id'):
+ params['HostedZoneId'] = module.params.get('hosted_zone_id')
+
+ if module.params.get('dns_name'):
+ params['DNSName'] = module.params.get('dns_name')
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ results = client.list_hosted_zones_by_name(**params)
+ return results
+
+
+def change_details(client, module):
+ params = dict()
+
+ if module.params.get('change_id'):
+ params['Id'] = module.params.get('change_id')
+ else:
+ module.fail_json(msg="change_id is required")
+
+ results = client.get_change(**params)
+ return results
+
+
+def checker_ip_range_details(client, module):
+ results = client.get_checker_ip_ranges()
+ return results
+
+
+def get_count(client, module):
+ if module.params.get('query') == 'health_check':
+ results = client.get_health_check_count()
+ else:
+ results = client.get_hosted_zone_count()
+
+ return results
+
+
+def get_health_check(client, module):
+ params = dict()
+
+ if not module.params.get('health_check_id'):
+ module.fail_json(msg="health_check_id is required")
+ else:
+ params['HealthCheckId'] = module.params.get('health_check_id')
+
+ if module.params.get('health_check_method') == 'details':
+ results = client.get_health_check(**params)
+ elif module.params.get('health_check_method') == 'failure_reason':
+ results = client.get_health_check_last_failure_reason(**params)
+ elif module.params.get('health_check_method') == 'status':
+ results = client.get_health_check_status(**params)
+
+ return results
+
+
+def get_resource_tags(client, module):
+ params = dict()
+
+ if module.params.get('resource_id'):
+ params['ResourceIds'] = module.params.get('resource_id')
+ else:
+ module.fail_json(msg="resource_id or resource_ids is required")
+
+ if module.params.get('query') == 'health_check':
+ params['ResourceType'] = 'healthcheck'
+ else:
+ params['ResourceType'] = 'hostedzone'
+
+ results = client.list_tags_for_resources(**params)
+ return results
+
+
+def list_health_checks(client, module):
+ params = dict()
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('next_marker'):
+ params['Marker'] = module.params.get('next_marker')
+
+ results = client.list_health_checks(**params)
+ return results
+
+
+def record_sets_details(client, module):
+ params = dict()
+
+ if module.params.get('hosted_zone_id'):
+ params['HostedZoneId'] = module.params.get('hosted_zone_id')
+ else:
+ module.fail_json(msg="Hosted Zone Id is required")
+
+ if module.params.get('max_items'):
+ params['MaxItems'] = module.params.get('max_items')
+
+ if module.params.get('start_record_name'):
+ params['StartRecordName'] = module.params.get('start_record_name')
+
+ if module.params.get('type') and not module.params.get('start_record_name'):
+ module.fail_json(msg="start_record_name must be specified if type is set")
+ elif module.params.get('type'):
+ params['StartRecordType'] = module.params.get('type')
+
+ results = client.list_resource_record_sets(**params)
+ return results
+
+
+def health_check_details(client, module):
+ health_check_invocations = {
+ 'list': list_health_checks,
+ 'details': get_health_check,
+ 'status': get_health_check,
+ 'failure_reason': get_health_check,
+ 'count': get_count,
+ 'tags': get_resource_tags,
+ }
+
+ results = health_check_invocations[module.params.get('health_check_method')](client, module)
+ return results
+
+
+def hosted_zone_details(client, module):
+ hosted_zone_invocations = {
+ 'details': get_hosted_zone,
+ 'list': list_hosted_zones,
+ 'list_by_name': list_hosted_zones_by_name,
+ 'count': get_count,
+ 'tags': get_resource_tags,
+ }
+
+ results = hosted_zone_invocations[module.params.get('hosted_zone_method')](client, module)
+ return results
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ query=dict(choices=[
+ 'change',
+ 'checker_ip_range',
+ 'health_check',
+ 'hosted_zone',
+ 'record_sets',
+ 'reusable_delegation_set',
+ ], required=True),
+ change_id=dict(),
+ hosted_zone_id=dict(),
+ max_items=dict(type='str'),
+ next_marker=dict(),
+ delegation_set_id=dict(),
+ start_record_name=dict(),
+ type=dict(choices=[
+ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'
+ ]),
+ dns_name=dict(),
+ resource_id=dict(type='list', aliases=['resource_ids']),
+ health_check_id=dict(),
+ hosted_zone_method=dict(choices=[
+ 'details',
+ 'list',
+ 'list_by_name',
+ 'count',
+ 'tags'
+ ], default='list'),
+ health_check_method=dict(choices=[
+ 'list',
+ 'details',
+ 'status',
+ 'failure_reason',
+ 'count',
+ 'tags',
+ ], default='list'),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive=[
+ ['hosted_zone_method', 'health_check_method'],
+ ],
+ )
+
+ # Validate Requirements
+ if not (HAS_BOTO or HAS_BOTO3):
+ module.fail_json(msg='json and boto/boto3 is required.')
+
+ try:
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ route53 = boto3_conn(module, conn_type='client', resource='route53', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg="Can't authorize connection - %s " % str(e))
+
+ invocations = {
+ 'change': change_details,
+ 'checker_ip_range': checker_ip_range_details,
+ 'health_check': health_check_details,
+ 'hosted_zone': hosted_zone_details,
+ 'record_sets': record_sets_details,
+ 'reusable_delegation_set': reusable_delegation_set_details,
+ }
+ results = invocations[module.params.get('query')](route53, module)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/route53_health_check.py b/lib/ansible/modules/cloud/amazon/route53_health_check.py
new file mode 100644
index 0000000000..0070b3e288
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/route53_health_check.py
@@ -0,0 +1,364 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: route53_health_check
+short_description: add or delete health-checks in Amazons Route53 DNS service
+description:
+ - Creates and deletes DNS Health checks in Amazons Route53 service
+ - Only the port, resource_path, string_match and request_interval are
+ considered when updating existing health-checks.
+version_added: "2.0"
+options:
+ state:
+ description:
+ - Specifies the action to take.
+ required: true
+ choices: [ 'present', 'absent' ]
+ ip_address:
+ description:
+ - IP address of the end-point to check. Either this or `fqdn` has to be
+ provided.
+ required: false
+ default: null
+ port:
+ description:
+ - The port on the endpoint on which you want Amazon Route 53 to perform
+ health checks. Required for TCP checks.
+ required: false
+ default: null
+ type:
+ description:
+ - The type of health check that you want to create, which indicates how
+ Amazon Route 53 determines whether an endpoint is healthy.
+ required: true
+ choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP' ]
+ resource_path:
+ description:
+ - The path that you want Amazon Route 53 to request when performing
+ health checks. The path can be any value for which your endpoint will
+ return an HTTP status code of 2xx or 3xx when the endpoint is healthy,
+ for example the file /docs/route53-health-check.html.
+ - Required for all checks except TCP.
+ - The path must begin with a /
+ - Maximum 255 characters.
+ required: false
+ default: null
+ fqdn:
+ description:
+ - Domain name of the endpoint to check. Either this or `ip_address` has
+ to be provided. When both are given the `fqdn` is used in the `Host:`
+ header of the HTTP request.
+ required: false
+ string_match:
+ description:
+ - If the check type is HTTP_STR_MATCH or HTTP_STR_MATCH, the string
+ that you want Amazon Route 53 to search for in the response body from
+ the specified resource. If the string appears in the first 5120 bytes
+ of the response body, Amazon Route 53 considers the resource healthy.
+ required: false
+ default: null
+ request_interval:
+ description:
+ - The number of seconds between the time that Amazon Route 53 gets a
+ response from your endpoint and the time that it sends the next
+ health-check request.
+ required: true
+ default: 30
+ choices: [ 10, 30 ]
+ failure_threshold:
+ description:
+ - The number of consecutive health checks that an endpoint must pass or
+ fail for Amazon Route 53 to change the current status of the endpoint
+ from unhealthy to healthy or vice versa.
+ required: true
+ default: 3
+ choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
+author: "zimbatm (@zimbatm)"
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Create a health-check for host1.example.com and use it in record
+- route53_health_check:
+ state: present
+ fqdn: host1.example.com
+ type: HTTP_STR_MATCH
+ resource_path: /
+ string_match: "Hello"
+ request_interval: 10
+ failure_threshold: 2
+ register: my_health_check
+
+- route53:
+ action: create
+ zone: "example.com"
+ type: CNAME
+ record: "www.example.com"
+ value: host1.example.com
+ ttl: 30
+ # Routing policy
+ identifier: "host1@www"
+ weight: 100
+ health_check: "{{ my_health_check.health_check.id }}"
+
+# Delete health-check
+- route53_health_check:
+ state: absent
+ fqdn: host1.example.com
+
+'''
+
+import uuid
+
+try:
+ import boto
+ import boto.ec2
+ from boto import route53
+ from boto.route53 import Route53Connection, exception
+ from boto.route53.healthcheck import HealthCheck
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info
+
+
+# Things that can't get changed:
+# protocol
+# ip_address or domain
+# request_interval
+# string_match if not previously enabled
+def find_health_check(conn, wanted):
+ """Searches for health checks that have the exact same set of immutable values"""
+ for check in conn.get_list_health_checks().HealthChecks:
+ config = check.HealthCheckConfig
+ if config.get('IPAddress') == wanted.ip_addr and config.get('FullyQualifiedDomainName') == wanted.fqdn and config.get('Type') == wanted.hc_type and config.get('RequestInterval') == str(wanted.request_interval):
+ return check
+ return None
+
+def to_health_check(config):
+ return HealthCheck(
+ config.get('IPAddress'),
+ config.get('Port'),
+ config.get('Type'),
+ config.get('ResourcePath'),
+ fqdn=config.get('FullyQualifiedDomainName'),
+ string_match=config.get('SearchString'),
+ request_interval=int(config.get('RequestInterval')),
+ failure_threshold=int(config.get('FailureThreshold')),
+ )
+
+def health_check_diff(a, b):
+ a = a.__dict__
+ b = b.__dict__
+ if a == b:
+ return {}
+ diff = {}
+ for key in set(a.keys()) | set(b.keys()):
+ if a.get(key) != b.get(key):
+ diff[key] = b.get(key)
+ return diff
+
+def to_template_params(health_check):
+ params = {
+ 'ip_addr_part': '',
+ 'port': health_check.port,
+ 'type': health_check.hc_type,
+ 'resource_path_part': '',
+ 'fqdn_part': '',
+ 'string_match_part': '',
+ 'request_interval': health_check.request_interval,
+ 'failure_threshold': health_check.failure_threshold,
+ }
+ if health_check.ip_addr:
+ params['ip_addr_part'] = HealthCheck.XMLIpAddrPart % {'ip_addr': health_check.ip_addr}
+ if health_check.resource_path:
+ params['resource_path_part'] = XMLResourcePathPart % {'resource_path': health_check.resource_path}
+ if health_check.fqdn:
+ params['fqdn_part'] = HealthCheck.XMLFQDNPart % {'fqdn': health_check.fqdn}
+ if health_check.string_match:
+ params['string_match_part'] = HealthCheck.XMLStringMatchPart % {'string_match': health_check.string_match}
+ return params
+
+XMLResourcePathPart = """<ResourcePath>%(resource_path)s</ResourcePath>"""
+
+POSTXMLBody = """
+ <CreateHealthCheckRequest xmlns="%(xmlns)s">
+ <CallerReference>%(caller_ref)s</CallerReference>
+ <HealthCheckConfig>
+ %(ip_addr_part)s
+ <Port>%(port)s</Port>
+ <Type>%(type)s</Type>
+ %(resource_path_part)s
+ %(fqdn_part)s
+ %(string_match_part)s
+ <RequestInterval>%(request_interval)s</RequestInterval>
+ <FailureThreshold>%(failure_threshold)s</FailureThreshold>
+ </HealthCheckConfig>
+ </CreateHealthCheckRequest>
+ """
+
+UPDATEHCXMLBody = """
+ <UpdateHealthCheckRequest xmlns="%(xmlns)s">
+ <HealthCheckVersion>%(health_check_version)s</HealthCheckVersion>
+ %(ip_addr_part)s
+ <Port>%(port)s</Port>
+ %(resource_path_part)s
+ %(fqdn_part)s
+ %(string_match_part)s
+ <FailureThreshold>%(failure_threshold)i</FailureThreshold>
+ </UpdateHealthCheckRequest>
+ """
+
+def create_health_check(conn, health_check, caller_ref = None):
+ if caller_ref is None:
+ caller_ref = str(uuid.uuid4())
+ uri = '/%s/healthcheck' % conn.Version
+ params = to_template_params(health_check)
+ params.update(xmlns=conn.XMLNameSpace, caller_ref=caller_ref)
+
+ xml_body = POSTXMLBody % params
+ response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 201:
+ e = boto.jsonresponse.Element()
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
+ else:
+ raise exception.DNSServerError(response.status, response.reason, body)
+
+def update_health_check(conn, health_check_id, health_check_version, health_check):
+ uri = '/%s/healthcheck/%s' % (conn.Version, health_check_id)
+ params = to_template_params(health_check)
+ params.update(
+ xmlns=conn.XMLNameSpace,
+ health_check_version=health_check_version,
+ )
+ xml_body = UPDATEHCXMLBody % params
+ response = conn.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status not in (200, 204):
+ raise exception.DNSServerError(response.status,
+ response.reason,
+ body)
+ e = boto.jsonresponse.Element()
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state = dict(choices=['present', 'absent'], default='present'),
+ ip_address = dict(),
+ port = dict(type='int'),
+ type = dict(required=True, choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']),
+ resource_path = dict(),
+ fqdn = dict(),
+ string_match = dict(),
+ request_interval = dict(type='int', choices=[10, 30], default=30),
+ failure_threshold = dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10], default=3),
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto 2.27.0+ required for this module')
+
+ state_in = module.params.get('state')
+ ip_addr_in = module.params.get('ip_address')
+ port_in = module.params.get('port')
+ type_in = module.params.get('type')
+ resource_path_in = module.params.get('resource_path')
+ fqdn_in = module.params.get('fqdn')
+ string_match_in = module.params.get('string_match')
+ request_interval_in = module.params.get('request_interval')
+ failure_threshold_in = module.params.get('failure_threshold')
+
+ if ip_addr_in is None and fqdn_in is None:
+ module.fail_json(msg="parameter 'ip_address' or 'fqdn' is required")
+
+ # Default port
+ if port_in is None:
+ if type_in in ['HTTP', 'HTTP_STR_MATCH']:
+ port_in = 80
+ elif type_in in ['HTTPS', 'HTTPS_STR_MATCH']:
+ port_in = 443
+ else:
+ module.fail_json(msg="parameter 'port' is required for 'type' TCP")
+
+ # string_match in relation with type
+ if type_in in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']:
+ if string_match_in is None:
+ module.fail_json(msg="parameter 'string_match' is required for the HTTP(S)_STR_MATCH types")
+ elif len(string_match_in) > 255:
+ module.fail_json(msg="parameter 'string_match' is limited to 255 characters max")
+ elif string_match_in:
+ module.fail_json(msg="parameter 'string_match' argument is only for the HTTP(S)_STR_MATCH types")
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
+ # connect to the route53 endpoint
+ try:
+ conn = Route53Connection(**aws_connect_kwargs)
+ except boto.exception.BotoServerError as e:
+ module.fail_json(msg = e.error_message)
+
+ changed = False
+ action = None
+ check_id = None
+ wanted_config = HealthCheck(ip_addr_in, port_in, type_in, resource_path_in, fqdn_in, string_match_in, request_interval_in, failure_threshold_in)
+ existing_check = find_health_check(conn, wanted_config)
+ if existing_check:
+ check_id = existing_check.Id
+ existing_config = to_health_check(existing_check.HealthCheckConfig)
+
+ if state_in == 'present':
+ if existing_check is None:
+ action = "create"
+ check_id = create_health_check(conn, wanted_config).HealthCheck.Id
+ changed = True
+ else:
+ diff = health_check_diff(existing_config, wanted_config)
+ if not diff:
+ action = "update"
+ update_health_check(conn, existing_check.Id, int(existing_check.HealthCheckVersion), wanted_config)
+ changed = True
+ elif state_in == 'absent':
+ if check_id:
+ action = "delete"
+ conn.delete_health_check(check_id)
+ changed = True
+ else:
+ module.fail_json(msg = "Logic Error: Unknown state")
+
+ module.exit_json(changed=changed, health_check=dict(id=check_id), action=action)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/route53_zone.py b/lib/ansible/modules/cloud/amazon/route53_zone.py
new file mode 100644
index 0000000000..758860f685
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/route53_zone.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: route53_zone
+short_description: add or delete Route53 zones
+description:
+ - Creates and deletes Route53 private and public zones
+version_added: "2.0"
+options:
+ zone:
+ description:
+ - "The DNS zone record (eg: foo.com.)"
+ required: true
+ state:
+ description:
+ - whether or not the zone should exist or not
+ required: false
+ default: true
+ choices: [ "present", "absent" ]
+ vpc_id:
+ description:
+ - The VPC ID the zone should be a part of (if this is going to be a private zone)
+ required: false
+ default: null
+ vpc_region:
+ description:
+ - The VPC Region the zone should be a part of (if this is going to be a private zone)
+ required: false
+ default: null
+ comment:
+ description:
+ - Comment associated with the zone
+ required: false
+ default: ''
+extends_documentation_fragment:
+ - aws
+ - ec2
+author: "Christopher Troup (@minichate)"
+'''
+
+EXAMPLES = '''
+# create a public zone
+- route53_zone:
+ zone: example.com
+ state: present
+ comment: this is an example
+
+# delete a public zone
+- route53_zone:
+ zone: example.com
+ state: absent
+
+- name: private zone for devel
+ route53_zone:
+ zone: devel.example.com
+ state: present
+ vpc_id: '{{ myvpc_id }}'
+ comment: developer domain
+
+# more complex example
+- name: register output after creating zone in parameterized region
+ route53_zone:
+ vpc_id: '{{ vpc.vpc_id }}'
+ vpc_region: '{{ ec2_region }}'
+ zone: '{{ vpc_dns_zone }}'
+ state: present
+ register: zone_out
+
+- debug:
+ var: zone_out
+'''
+
+RETURN='''
+comment:
+ description: optional hosted zone comment
+ returned: when hosted zone exists
+ type: string
+ sample: "Private zone"
+name:
+ description: hosted zone name
+ returned: when hosted zone exists
+ type: string
+ sample: "private.local."
+private_zone:
+ description: whether hosted zone is private or public
+ returned: when hosted zone exists
+ type: bool
+ sample: true
+vpc_id:
+ description: id of vpc attached to private hosted zone
+ returned: for private hosted zone
+ type: string
+ sample: "vpc-1d36c84f"
+vpc_region:
+ description: region of vpc attached to private hosted zone
+ returned: for private hosted zone
+ type: string
+ sample: "eu-west-1"
+zone_id:
+ description: hosted zone id
+ returned: when hosted zone exists
+ type: string
+ sample: "Z6JQG9820BEFMW"
+'''
+
+try:
+ import boto
+ import boto.ec2
+ from boto import route53
+ from boto.route53 import Route53Connection
+ from boto.route53.zone import Zone
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ zone=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ vpc_id=dict(default=None),
+ vpc_region=dict(default=None),
+ comment=dict(default='')))
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ zone_in = module.params.get('zone').lower()
+ state = module.params.get('state').lower()
+ vpc_id = module.params.get('vpc_id')
+ vpc_region = module.params.get('vpc_region')
+ comment = module.params.get('comment')
+
+ if zone_in[-1:] != '.':
+ zone_in += "."
+
+ private_zone = vpc_id is not None and vpc_region is not None
+
+ _, _, aws_connect_kwargs = get_aws_connection_info(module)
+
+ # connect to the route53 endpoint
+ try:
+ conn = Route53Connection(**aws_connect_kwargs)
+ except boto.exception.BotoServerError as e:
+ module.fail_json(msg=e.error_message)
+
+ results = conn.get_all_hosted_zones()
+ zones = {}
+
+ for r53zone in results['ListHostedZonesResponse']['HostedZones']:
+ zone_id = r53zone['Id'].replace('/hostedzone/', '')
+ zone_details = conn.get_hosted_zone(zone_id)['GetHostedZoneResponse']
+ if vpc_id and 'VPCs' in zone_details:
+ # this is to deal with this boto bug: https://github.com/boto/boto/pull/2882
+ if isinstance(zone_details['VPCs'], dict):
+ if zone_details['VPCs']['VPC']['VPCId'] == vpc_id:
+ zones[r53zone['Name']] = zone_id
+ else: # Forward compatibility for when boto fixes that bug
+ if vpc_id in [v['VPCId'] for v in zone_details['VPCs']]:
+ zones[r53zone['Name']] = zone_id
+ else:
+ zones[r53zone['Name']] = zone_id
+
+ record = {
+ 'private_zone': private_zone,
+ 'vpc_id': vpc_id,
+ 'vpc_region': vpc_region,
+ 'comment': comment,
+ }
+
+ if state == 'present' and zone_in in zones:
+ if private_zone:
+ details = conn.get_hosted_zone(zones[zone_in])
+
+ if 'VPCs' not in details['GetHostedZoneResponse']:
+ module.fail_json(
+ msg="Can't change VPC from public to private"
+ )
+
+ vpc_details = details['GetHostedZoneResponse']['VPCs']['VPC']
+ current_vpc_id = vpc_details['VPCId']
+ current_vpc_region = vpc_details['VPCRegion']
+
+ if current_vpc_id != vpc_id:
+ module.fail_json(
+ msg="Can't change VPC ID once a zone has been created"
+ )
+ if current_vpc_region != vpc_region:
+ module.fail_json(
+ msg="Can't change VPC Region once a zone has been created"
+ )
+
+ record['zone_id'] = zones[zone_in]
+ record['name'] = zone_in
+ module.exit_json(changed=False, set=record)
+
+ elif state == 'present':
+ result = conn.create_hosted_zone(zone_in, **record)
+ hosted_zone = result['CreateHostedZoneResponse']['HostedZone']
+ zone_id = hosted_zone['Id'].replace('/hostedzone/', '')
+ record['zone_id'] = zone_id
+ record['name'] = zone_in
+ module.exit_json(changed=True, set=record)
+
+ elif state == 'absent' and zone_in in zones:
+ conn.delete_hosted_zone(zones[zone_in])
+ module.exit_json(changed=True)
+
+ elif state == 'absent':
+ module.exit_json(changed=False)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/s3_bucket.py b/lib/ansible/modules/cloud/amazon/s3_bucket.py
new file mode 100644
index 0000000000..970967e30b
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/s3_bucket.py
@@ -0,0 +1,437 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: s3_bucket
+short_description: Manage S3 buckets in AWS, Ceph, Walrus and FakeS3
+description:
+ - Manage S3 buckets in AWS, Ceph, Walrus and FakeS3
+version_added: "2.0"
+author: "Rob White (@wimnat)"
+options:
+ force:
+ description:
+ - When trying to delete a bucket, delete all keys in the bucket first (an s3 bucket must be empty for a successful deletion)
+ required: false
+ default: no
+ choices: [ 'yes', 'no' ]
+ name:
+ description:
+ - Name of the s3 bucket
+ required: true
+ default: null
+ policy:
+ description:
+ - The JSON policy as a string.
+ required: false
+ default: null
+ s3_url:
+ description:
+ - S3 URL endpoint for usage with Ceph, Eucalypus, fakes3, etc. Otherwise assumes AWS
+ default: null
+ aliases: [ S3_URL ]
+ ceph:
+ description:
+ - Enable API compatibility with Ceph. It takes into account the S3 API subset working with Ceph in order to provide the same module behaviour where possible.
+ version_added: "2.2"
+ requester_pays:
+ description:
+ - With Requester Pays buckets, the requester instead of the bucket owner pays the cost of the request and the data download from the bucket.
+ required: false
+ default: no
+ choices: [ 'yes', 'no' ]
+ state:
+ description:
+ - Create or remove the s3 bucket
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ tags:
+ description:
+ - tags dict to apply to bucket
+ required: false
+ default: null
+ versioning:
+ description:
+ - Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
+ required: false
+ default: null
+ choices: [ 'yes', 'no' ]
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Create a simple s3 bucket
+- s3_bucket:
+ name: mys3bucket
+
+# Create a simple s3 bucket on Ceph Rados Gateway
+- s3_bucket:
+ name: mys3bucket
+ s3_url: http://your-ceph-rados-gateway-server.xxx
+ ceph: true
+
+# Remove an s3 bucket and any keys it contains
+- s3_bucket:
+ name: mys3bucket
+ state: absent
+ force: yes
+
+# Create a bucket, add a policy from a file, enable requester pays, enable versioning and tag
+- s3_bucket:
+ name: mys3bucket
+ policy: "{{ lookup('file','policy.json') }}"
+ requester_pays: yes
+ versioning: yes
+ tags:
+ example: tag1
+ another: tag2
+
+'''
+
+import os
+import xml.etree.ElementTree as ET
+import urlparse
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+try:
+ import boto.ec2
+ from boto.s3.connection import OrdinaryCallingFormat, Location
+ from boto.s3.tagging import Tags, TagSet
+ from boto.exception import BotoServerError, S3CreateError, S3ResponseError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+
+def get_request_payment_status(bucket):
+
+ response = bucket.get_request_payment()
+ root = ET.fromstring(response)
+ for message in root.findall('.//{http://s3.amazonaws.com/doc/2006-03-01/}Payer'):
+ payer = message.text
+
+ return (payer != "BucketOwner")
+
+
+def create_tags_container(tags):
+
+ tag_set = TagSet()
+ tags_obj = Tags()
+ for key, val in tags.iteritems():
+ tag_set.add_tag(key, val)
+
+ tags_obj.add_tag_set(tag_set)
+ return tags_obj
+
+
+def _create_or_update_bucket(connection, module, location):
+
+ policy = module.params.get("policy")
+ name = module.params.get("name")
+ requester_pays = module.params.get("requester_pays")
+ tags = module.params.get("tags")
+ versioning = module.params.get("versioning")
+ changed = False
+
+ try:
+ bucket = connection.get_bucket(name)
+ except S3ResponseError as e:
+ try:
+ bucket = connection.create_bucket(name, location=location)
+ changed = True
+ except S3CreateError as e:
+ module.fail_json(msg=e.message)
+
+ # Versioning
+ versioning_status = bucket.get_versioning_status()
+ if versioning_status:
+ if versioning is not None:
+ if versioning and versioning_status['Versioning'] != "Enabled":
+ try:
+ bucket.configure_versioning(versioning)
+ changed = True
+ versioning_status = bucket.get_versioning_status()
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+ elif not versioning and versioning_status['Versioning'] != "Enabled":
+ try:
+ bucket.configure_versioning(versioning)
+ changed = True
+ versioning_status = bucket.get_versioning_status()
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ # Requester pays
+ requester_pays_status = get_request_payment_status(bucket)
+ if requester_pays_status != requester_pays:
+ if requester_pays:
+ payer='Requester'
+ else:
+ payer='BucketOwner'
+ bucket.set_request_payment(payer=payer)
+ changed = True
+ requester_pays_status = get_request_payment_status(bucket)
+
+ # Policy
+ try:
+ current_policy = json.loads(bucket.get_policy())
+ except S3ResponseError as e:
+ if e.error_code == "NoSuchBucketPolicy":
+ current_policy = {}
+ else:
+ module.fail_json(msg=e.message)
+ if policy is not None:
+ if isinstance(policy, basestring):
+ policy = json.loads(policy)
+
+ if not policy:
+ bucket.delete_policy()
+ # only show changed if there was already a policy
+ changed = bool(current_policy)
+
+ elif current_policy != policy:
+ try:
+ bucket.set_policy(json.dumps(policy))
+ changed = True
+ current_policy = json.loads(bucket.get_policy())
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ # Tags
+ try:
+ current_tags = bucket.get_tags()
+ except S3ResponseError as e:
+ if e.error_code == "NoSuchTagSet":
+ current_tags = None
+ else:
+ module.fail_json(msg=e.message)
+
+ if current_tags is None:
+ current_tags_dict = {}
+ else:
+ current_tags_dict = dict((t.key, t.value) for t in current_tags[0])
+
+ if tags is not None:
+ if current_tags_dict != tags:
+ try:
+ if tags:
+ bucket.set_tags(create_tags_container(tags))
+ else:
+ bucket.delete_tags()
+ current_tags_dict = tags
+ changed = True
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed, name=bucket.name, versioning=versioning_status, requester_pays=requester_pays_status, policy=current_policy, tags=current_tags_dict)
+
+
+def _destroy_bucket(connection, module):
+
+ force = module.params.get("force")
+ name = module.params.get("name")
+ changed = False
+
+ try:
+ bucket = connection.get_bucket(name)
+ except S3ResponseError as e:
+ if e.error_code != "NoSuchBucket":
+ module.fail_json(msg=e.message)
+ else:
+ # Bucket already absent
+ module.exit_json(changed=changed)
+
+ if force:
+ try:
+ # Empty the bucket
+ for key in bucket.list():
+ key.delete()
+
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ try:
+ bucket = connection.delete_bucket(name)
+ changed = True
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed)
+
+
+def _create_or_update_bucket_ceph(connection, module, location):
+ #TODO: add update
+
+ name = module.params.get("name")
+
+ changed = False
+
+ try:
+ bucket = connection.get_bucket(name)
+ except S3ResponseError as e:
+ try:
+ bucket = connection.create_bucket(name, location=location)
+ changed = True
+ except S3CreateError as e:
+ module.fail_json(msg=e.message)
+
+ if bucket:
+ module.exit_json(changed=changed)
+ else:
+ module.fail_json(msg='Unable to create bucket, no error from the API')
+
+
+def _destroy_bucket_ceph(connection, module):
+
+ _destroy_bucket(connection, module)
+
+
+def create_or_update_bucket(connection, module, location, flavour='aws'):
+ if flavour == 'ceph':
+ _create_or_update_bucket_ceph(connection, module, location)
+ else:
+ _create_or_update_bucket(connection, module, location)
+
+
+def destroy_bucket(connection, module, flavour='aws'):
+ if flavour == 'ceph':
+ _destroy_bucket_ceph(connection, module)
+ else:
+ _destroy_bucket(connection, module)
+
+
+def is_fakes3(s3_url):
+ """ Return True if s3_url has scheme fakes3:// """
+ if s3_url is not None:
+ return urlparse.urlparse(s3_url).scheme in ('fakes3', 'fakes3s')
+ else:
+ return False
+
+
+def is_walrus(s3_url):
+ """ Return True if it's Walrus endpoint, not S3
+
+ We assume anything other than *.amazonaws.com is Walrus"""
+ if s3_url is not None:
+ o = urlparse.urlparse(s3_url)
+ return not o.hostname.endswith('amazonaws.com')
+ else:
+ return False
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ force=dict(required=False, default='no', type='bool'),
+ policy=dict(required=False, default=None, type='json'),
+ name=dict(required=True, type='str'),
+ requester_pays=dict(default='no', type='bool'),
+ s3_url=dict(aliases=['S3_URL'], type='str'),
+ state=dict(default='present', type='str', choices=['present', 'absent']),
+ tags=dict(required=False, default=None, type='dict'),
+ versioning=dict(default=None, type='bool'),
+ ceph=dict(default='no', type='bool')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region in ('us-east-1', '', None):
+ # S3ism for the US Standard region
+ location = Location.DEFAULT
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+
+ s3_url = module.params.get('s3_url')
+
+ # allow eucarc environment variables to be used if ansible vars aren't set
+ if not s3_url and 'S3_URL' in os.environ:
+ s3_url = os.environ['S3_URL']
+
+ ceph = module.params.get('ceph')
+
+ if ceph and not s3_url:
+ module.fail_json(msg='ceph flavour requires s3_url')
+
+ flavour = 'aws'
+
+ # Look at s3_url and tweak connection settings
+ # if connecting to Walrus or fakes3
+ try:
+ if s3_url and ceph:
+ ceph = urlparse.urlparse(s3_url)
+ connection = boto.connect_s3(
+ host=ceph.hostname,
+ port=ceph.port,
+ is_secure=ceph.scheme == 'https',
+ calling_format=OrdinaryCallingFormat(),
+ **aws_connect_params
+ )
+ flavour = 'ceph'
+ elif is_fakes3(s3_url):
+ fakes3 = urlparse.urlparse(s3_url)
+ connection = S3Connection(
+ is_secure=fakes3.scheme == 'fakes3s',
+ host=fakes3.hostname,
+ port=fakes3.port,
+ calling_format=OrdinaryCallingFormat(),
+ **aws_connect_params
+ )
+ elif is_walrus(s3_url):
+ walrus = urlparse.urlparse(s3_url).hostname
+ connection = boto.connect_walrus(walrus, **aws_connect_params)
+ else:
+ connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
+ # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
+ if connection is None:
+ connection = boto.connect_s3(**aws_connect_params)
+
+ except boto.exception.NoAuthHandlerFound as e:
+ module.fail_json(msg='No Authentication Handler found: %s ' % str(e))
+ except Exception as e:
+ module.fail_json(msg='Failed to connect to S3: %s' % str(e))
+
+ if connection is None: # this should never happen
+ module.fail_json(msg ='Unknown error, failed to create s3 connection, no information from boto.')
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ create_or_update_bucket(connection, module, location)
+ elif state == 'absent':
+ destroy_bucket(connection, module, flavour=flavour)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/s3_lifecycle.py b/lib/ansible/modules/cloud/amazon/s3_lifecycle.py
new file mode 100644
index 0000000000..f981dfadb8
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/s3_lifecycle.py
@@ -0,0 +1,439 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: s3_lifecycle
+short_description: Manage s3 bucket lifecycle rules in AWS
+description:
+ - Manage s3 bucket lifecycle rules in AWS
+version_added: "2.0"
+author: "Rob White (@wimnat)"
+notes:
+ - If specifying expiration time as days then transition time must also be specified in days
+ - If specifying expiration time as a date then transition time must also be specified as a date
+requirements:
+ - python-dateutil
+options:
+ name:
+ description:
+ - "Name of the s3 bucket"
+ required: true
+ expiration_date:
+ description:
+ - "Indicates the lifetime of the objects that are subject to the rule by the date they will expire. The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified."
+ required: false
+ default: null
+ expiration_days:
+ description:
+ - "Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer."
+ required: false
+ default: null
+ prefix:
+ description:
+ - "Prefix identifying one or more objects to which the rule applies. If no prefix is specified, the rule will apply to the whole bucket."
+ required: false
+ default: null
+ rule_id:
+ description:
+ - "Unique identifier for the rule. The value cannot be longer than 255 characters. A unique value for the rule will be generated if no value is provided."
+ required: false
+ default: null
+ state:
+ description:
+ - "Create or remove the lifecycle rule"
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ status:
+ description:
+ - "If 'enabled', the rule is currently being applied. If 'disabled', the rule is not currently being applied."
+ required: false
+ default: enabled
+ choices: [ 'enabled', 'disabled' ]
+ storage_class:
+ description:
+ - "The storage class to transition to. Currently there are two supported values - 'glacier' or 'standard_ia'."
+ - "The 'standard_ia' class is only being available from Ansible version 2.2."
+ required: false
+ default: glacier
+ choices: [ 'glacier', 'standard_ia']
+ transition_date:
+ description:
+ - "Indicates the lifetime of the objects that are subject to the rule by the date they will transition to a different storage class. The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. If transition_days is not specified, this parameter is required."
+ required: false
+ default: null
+ transition_days:
+ description:
+ - "Indicates when, in days, an object transitions to a different storage class. If transition_date is not specified, this parameter is required."
+ required: false
+ default: null
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days
+- s3_lifecycle:
+ name: mybucket
+ expiration_days: 30
+ prefix: /logs/
+ status: enabled
+ state: present
+
+# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days
+- s3_lifecycle:
+ name: mybucket
+ transition_days: 7
+ expiration_days: 90
+ prefix: /logs/
+ status: enabled
+ state: present
+
+# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030. Note that midnight GMT must be specified.
+# Be sure to quote your date strings
+- s3_lifecycle:
+ name: mybucket
+ transition_date: "2020-12-30T00:00:00.000Z"
+ expiration_date: "2030-12-30T00:00:00.000Z"
+ prefix: /logs/
+ status: enabled
+ state: present
+
+# Disable the rule created above
+- s3_lifecycle:
+ name: mybucket
+ prefix: /logs/
+ status: disabled
+ state: present
+
+# Delete the lifecycle rule created above
+- s3_lifecycle:
+ name: mybucket
+ prefix: /logs/
+ state: absent
+
+# Configure a lifecycle rule to transition all backup files older than 31 days in /backups/ to standard infrequent access class.
+- s3_lifecycle:
+ name: mybucket
+ prefix: /backups/
+ storage_class: standard_ia
+ transition_days: 31
+ state: present
+ status: enabled
+
+'''
+
+import xml.etree.ElementTree as ET
+import copy
+import datetime
+
+try:
+ import dateutil.parser
+ HAS_DATEUTIL = True
+except ImportError:
+ HAS_DATEUTIL = False
+
+try:
+ import boto
+ import boto.ec2
+ from boto.s3.connection import OrdinaryCallingFormat, Location
+ from boto.s3.lifecycle import Lifecycle, Rule, Expiration, Transition
+ from boto.exception import BotoServerError, S3CreateError, S3ResponseError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info
+
+def create_lifecycle_rule(connection, module):
+
+ name = module.params.get("name")
+ expiration_date = module.params.get("expiration_date")
+ expiration_days = module.params.get("expiration_days")
+ prefix = module.params.get("prefix")
+ rule_id = module.params.get("rule_id")
+ status = module.params.get("status")
+ storage_class = module.params.get("storage_class")
+ transition_date = module.params.get("transition_date")
+ transition_days = module.params.get("transition_days")
+ changed = False
+
+ try:
+ bucket = connection.get_bucket(name)
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ # Get the bucket's current lifecycle rules
+ try:
+ current_lifecycle_obj = bucket.get_lifecycle_config()
+ except S3ResponseError as e:
+ if e.error_code == "NoSuchLifecycleConfiguration":
+ current_lifecycle_obj = Lifecycle()
+ else:
+ module.fail_json(msg=e.message)
+
+ # Create expiration
+ if expiration_days is not None:
+ expiration_obj = Expiration(days=expiration_days)
+ elif expiration_date is not None:
+ expiration_obj = Expiration(date=expiration_date)
+ else:
+ expiration_obj = None
+
+ # Create transition
+ if transition_days is not None:
+ transition_obj = Transition(days=transition_days, storage_class=storage_class.upper())
+ elif transition_date is not None:
+ transition_obj = Transition(date=transition_date, storage_class=storage_class.upper())
+ else:
+ transition_obj = None
+
+ # Create rule
+ rule = Rule(rule_id, prefix, status.title(), expiration_obj, transition_obj)
+
+ # Create lifecycle
+ lifecycle_obj = Lifecycle()
+
+ appended = False
+ # If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule
+ if current_lifecycle_obj:
+ # If rule ID exists, use that for comparison otherwise compare based on prefix
+ for existing_rule in current_lifecycle_obj:
+ if rule.id == existing_rule.id:
+ if compare_rule(rule, existing_rule):
+ lifecycle_obj.append(rule)
+ appended = True
+ else:
+ lifecycle_obj.append(rule)
+ changed = True
+ appended = True
+ elif rule.prefix == existing_rule.prefix:
+ existing_rule.id = None
+ if compare_rule(rule, existing_rule):
+ lifecycle_obj.append(rule)
+ appended = True
+ else:
+ lifecycle_obj.append(rule)
+ changed = True
+ appended = True
+ else:
+ lifecycle_obj.append(existing_rule)
+ # If nothing appended then append now as the rule must not exist
+ if not appended:
+ lifecycle_obj.append(rule)
+ changed = True
+ else:
+ lifecycle_obj.append(rule)
+ changed = True
+
+ # Write lifecycle to bucket
+ try:
+ bucket.configure_lifecycle(lifecycle_obj)
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed)
+
+def compare_rule(rule_a, rule_b):
+
+ # Copy objects
+ rule1 = copy.deepcopy(rule_a)
+ rule2 = copy.deepcopy(rule_b)
+
+ # Delete Rule from Rule
+ try:
+ del rule1.Rule
+ except AttributeError:
+ pass
+
+ try:
+ del rule2.Rule
+ except AttributeError:
+ pass
+
+ # Extract Expiration and Transition objects
+ rule1_expiration = rule1.expiration
+ rule1_transition = rule1.transition
+ rule2_expiration = rule2.expiration
+ rule2_transition = rule2.transition
+
+ # Delete the Expiration and Transition objects from the Rule objects
+ del rule1.expiration
+ del rule1.transition
+ del rule2.expiration
+ del rule2.transition
+
+ # Compare
+ if rule1_transition is None:
+ rule1_transition = Transition()
+ if rule2_transition is None:
+ rule2_transition = Transition()
+ if rule1_expiration is None:
+ rule1_expiration = Expiration()
+ if rule2_expiration is None:
+ rule2_expiration = Expiration()
+
+ if (rule1.__dict__ == rule2.__dict__) and (rule1_expiration.__dict__ == rule2_expiration.__dict__) and (rule1_transition.__dict__ == rule2_transition.__dict__):
+ return True
+ else:
+ return False
+
+
+def destroy_lifecycle_rule(connection, module):
+
+ name = module.params.get("name")
+ prefix = module.params.get("prefix")
+ rule_id = module.params.get("rule_id")
+ changed = False
+
+ if prefix is None:
+ prefix = ""
+
+ try:
+ bucket = connection.get_bucket(name)
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ # Get the bucket's current lifecycle rules
+ try:
+ current_lifecycle_obj = bucket.get_lifecycle_config()
+ except S3ResponseError as e:
+ if e.error_code == "NoSuchLifecycleConfiguration":
+ module.exit_json(changed=changed)
+ else:
+ module.fail_json(msg=e.message)
+
+ # Create lifecycle
+ lifecycle_obj = Lifecycle()
+
+ # Check if rule exists
+ # If an ID exists, use that otherwise compare based on prefix
+ if rule_id is not None:
+ for existing_rule in current_lifecycle_obj:
+ if rule_id == existing_rule.id:
+ # We're not keeping the rule (i.e. deleting) so mark as changed
+ changed = True
+ else:
+ lifecycle_obj.append(existing_rule)
+ else:
+ for existing_rule in current_lifecycle_obj:
+ if prefix == existing_rule.prefix:
+ # We're not keeping the rule (i.e. deleting) so mark as changed
+ changed = True
+ else:
+ lifecycle_obj.append(existing_rule)
+
+ # Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration
+ try:
+ if lifecycle_obj:
+ bucket.configure_lifecycle(lifecycle_obj)
+ else:
+ bucket.delete_lifecycle_configuration()
+ except BotoServerError as e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name = dict(required=True, type='str'),
+ expiration_days = dict(default=None, required=False, type='int'),
+ expiration_date = dict(default=None, required=False, type='str'),
+ prefix = dict(default=None, required=False),
+ requester_pays = dict(default='no', type='bool'),
+ rule_id = dict(required=False, type='str'),
+ state = dict(default='present', choices=['present', 'absent']),
+ status = dict(default='enabled', choices=['enabled', 'disabled']),
+ storage_class = dict(default='glacier', type='str', choices=['glacier', 'standard_ia']),
+ transition_days = dict(default=None, required=False, type='int'),
+ transition_date = dict(default=None, required=False, type='str')
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive = [
+ [ 'expiration_days', 'expiration_date' ],
+ [ 'expiration_days', 'transition_date' ],
+ [ 'transition_days', 'transition_date' ],
+ [ 'transition_days', 'expiration_date' ]
+ ]
+ )
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ if not HAS_DATEUTIL:
+ module.fail_json(msg='dateutil required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region in ('us-east-1', '', None):
+ # S3ism for the US Standard region
+ location = Location.DEFAULT
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+ try:
+ connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
+ # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
+ if connection is None:
+ connection = boto.connect_s3(**aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+
+ expiration_date = module.params.get("expiration_date")
+ transition_date = module.params.get("transition_date")
+ state = module.params.get("state")
+ storage_class = module.params.get("storage_class")
+
+ # If expiration_date set, check string is valid
+ if expiration_date is not None:
+ try:
+ datetime.datetime.strptime(expiration_date, "%Y-%m-%dT%H:%M:%S.000Z")
+ except ValueError as e:
+ module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
+
+ if transition_date is not None:
+ try:
+ datetime.datetime.strptime(transition_date, "%Y-%m-%dT%H:%M:%S.000Z")
+ except ValueError as e:
+ module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
+
+ boto_required_version = (2,40,0)
+ if storage_class == 'standard_ia' and tuple(map(int, (boto.__version__.split(".")))) < boto_required_version:
+ module.fail_json(msg="'standard_ia' class requires boto >= 2.40.0")
+
+ if state == 'present':
+ create_lifecycle_rule(connection, module)
+ elif state == 'absent':
+ destroy_lifecycle_rule(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/s3_logging.py b/lib/ansible/modules/cloud/amazon/s3_logging.py
new file mode 100644
index 0000000000..038cacbef5
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/s3_logging.py
@@ -0,0 +1,184 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: s3_logging
+short_description: Manage logging facility of an s3 bucket in AWS
+description:
+ - Manage logging facility of an s3 bucket in AWS
+version_added: "2.0"
+author: Rob White (@wimnat)
+options:
+ name:
+ description:
+ - "Name of the s3 bucket."
+ required: true
+ state:
+ description:
+ - "Enable or disable logging."
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ target_bucket:
+ description:
+ - "The bucket to log to. Required when state=present."
+ required: false
+ default: null
+ target_prefix:
+ description:
+ - "The prefix that should be prepended to the generated log files written to the target_bucket."
+ required: false
+ default: ""
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+- name: Enable logging of s3 bucket mywebsite.com to s3 bucket mylogs
+ s3_logging:
+ name: mywebsite.com
+ target_bucket: mylogs
+ target_prefix: logs/mywebsite.com
+ state: present
+
+- name: Remove logging on an s3 bucket
+ s3_logging:
+ name: mywebsite.com
+ state: absent
+
+'''
+
+try:
+ import boto.ec2
+ from boto.s3.connection import OrdinaryCallingFormat, Location
+ from boto.exception import BotoServerError, S3CreateError, S3ResponseError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import AnsibleAWSError, ec2_argument_spec, get_aws_connection_info
+
+
+def compare_bucket_logging(bucket, target_bucket, target_prefix):
+
+ bucket_log_obj = bucket.get_logging_status()
+ if bucket_log_obj.target != target_bucket or bucket_log_obj.prefix != target_prefix:
+ return False
+ else:
+ return True
+
+
+def enable_bucket_logging(connection, module):
+
+ bucket_name = module.params.get("name")
+ target_bucket = module.params.get("target_bucket")
+ target_prefix = module.params.get("target_prefix")
+ changed = False
+
+ try:
+ bucket = connection.get_bucket(bucket_name)
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ try:
+ if not compare_bucket_logging(bucket, target_bucket, target_prefix):
+ # Before we can enable logging we must give the log-delivery group WRITE and READ_ACP permissions to the target bucket
+ try:
+ target_bucket_obj = connection.get_bucket(target_bucket)
+ except S3ResponseError as e:
+ if e.status == 301:
+ module.fail_json(msg="the logging target bucket must be in the same region as the bucket being logged")
+ else:
+ module.fail_json(msg=e.message)
+ target_bucket_obj.set_as_logging_target()
+
+ bucket.enable_logging(target_bucket, target_prefix)
+ changed = True
+
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed)
+
+
+def disable_bucket_logging(connection, module):
+
+ bucket_name = module.params.get("name")
+ changed = False
+
+ try:
+ bucket = connection.get_bucket(bucket_name)
+ if not compare_bucket_logging(bucket, None, None):
+ bucket.disable_logging()
+ changed = True
+ except S3ResponseError as e:
+ module.fail_json(msg=e.message)
+
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name = dict(required=True),
+ target_bucket = dict(required=False, default=None),
+ target_prefix = dict(required=False, default=""),
+ state = dict(required=False, default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region in ('us-east-1', '', None):
+ # S3ism for the US Standard region
+ location = Location.DEFAULT
+ else:
+ # Boto uses symbolic names for locations but region strings will
+ # actually work fine for everything except us-east-1 (US Standard)
+ location = region
+ try:
+ connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
+ # use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
+ if connection is None:
+ connection = boto.connect_s3(**aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ enable_bucket_logging(connection, module)
+ elif state == 'absent':
+ disable_bucket_logging(connection, module)
+
+
+if __name__ == '__main__':
+ main() \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/amazon/s3_website.py b/lib/ansible/modules/cloud/amazon/s3_website.py
new file mode 100644
index 0000000000..b8e1503b2d
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/s3_website.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: s3_website
+short_description: Configure an s3 bucket as a website
+description:
+ - Configure an s3 bucket as a website
+version_added: "2.2"
+author: Rob White (@wimnat)
+options:
+ name:
+ description:
+ - "Name of the s3 bucket"
+ required: true
+ default: null
+ error_key:
+ description:
+ - "The object key name to use when a 4XX class error occurs. To remove an error key, set to None."
+ required: false
+ default: null
+ redirect_all_requests:
+ description:
+ - "Describes the redirect behavior for every request to this s3 bucket website endpoint"
+ required: false
+ default: null
+ region:
+ description:
+ - "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard."
+ required: false
+ default: null
+ state:
+ description:
+ - "Add or remove s3 website configuration"
+ required: false
+ default: present
+ choices: [ 'present', 'absent' ]
+ suffix:
+ description:
+ - "Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash character."
+ required: false
+ default: index.html
+
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Configure an s3 bucket to redirect all requests to example.com
+- s3_website:
+ name: mybucket.com
+ redirect_all_requests: example.com
+ state: present
+
+# Remove website configuration from an s3 bucket
+- s3_website:
+ name: mybucket.com
+ state: absent
+
+# Configure an s3 bucket as a website with index and error pages
+- s3_website:
+ name: mybucket.com
+ suffix: home.htm
+ error_key: errors/404.htm
+ state: present
+
+'''
+
+RETURN = '''
+index_document:
+ suffix:
+ description: suffix that is appended to a request that is for a directory on the website endpoint
+ returned: success
+ type: string
+ sample: index.html
+error_document:
+ key:
+ description: object key name to use when a 4XX class error occurs
+ returned: when error_document parameter set
+ type: string
+ sample: error.html
+redirect_all_requests_to:
+ host_name:
+ description: name of the host where requests will be redirected.
+ returned: when redirect all requests parameter set
+ type: string
+ sample: ansible.com
+routing_rules:
+ routing_rule:
+ host_name:
+ description: name of the host where requests will be redirected.
+ returned: when host name set as part of redirect rule
+ type: string
+ sample: ansible.com
+ condition:
+ key_prefix_equals:
+ description: object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html
+ returned: when routing rule present
+ type: string
+ sample: docs/
+ redirect:
+ replace_key_prefix_with:
+ description: object key prefix to use in the redirect request
+ returned: when routing rule present
+ type: string
+ sample: documents/
+
+'''
+
+import time
+
+try:
+ from botocore.exceptions import ClientError, ParamValidationError, NoCredentialsError
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+def _create_redirect_dict(url):
+
+ redirect_dict = {}
+ url_split = url.split(':')
+
+ # Did we split anything?
+ if len(url_split) == 2:
+ redirect_dict[u'Protocol'] = url_split[0]
+ redirect_dict[u'HostName'] = url_split[1].replace('//', '')
+ elif len(url_split) == 1:
+ redirect_dict[u'HostName'] = url_split[0]
+ else:
+ raise ValueError('Redirect URL appears invalid')
+
+ return redirect_dict
+
+
+def _create_website_configuration(suffix, error_key, redirect_all_requests):
+
+ website_configuration = {}
+
+ if error_key is not None:
+ website_configuration['ErrorDocument'] = { 'Key': error_key }
+
+ if suffix is not None:
+ website_configuration['IndexDocument'] = { 'Suffix': suffix }
+
+ if redirect_all_requests is not None:
+ website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests)
+
+ return website_configuration
+
+
+def enable_or_update_bucket_as_website(client_connection, resource_connection, module):
+
+ bucket_name = module.params.get("name")
+ redirect_all_requests = module.params.get("redirect_all_requests")
+ # If redirect_all_requests is set then don't use the default suffix that has been set
+ if redirect_all_requests is not None:
+ suffix = None
+ else:
+ suffix = module.params.get("suffix")
+ error_key = module.params.get("error_key")
+ changed = False
+
+ try:
+ bucket_website = resource_connection.BucketWebsite(bucket_name)
+ except ClientError as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+
+ try:
+ website_config = client_connection.get_bucket_website(Bucket=bucket_name)
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration':
+ website_config = None
+ else:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+
+ if website_config is None:
+ try:
+ bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
+ changed = True
+ except (ClientError, ParamValidationError) as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+ except ValueError as e:
+ module.fail_json(msg=str(e))
+ else:
+ try:
+ if (suffix is not None and website_config['IndexDocument']['Suffix'] != suffix) or \
+ (error_key is not None and website_config['ErrorDocument']['Key'] != error_key) or \
+ (redirect_all_requests is not None and website_config['RedirectAllRequestsTo'] != _create_redirect_dict(redirect_all_requests)):
+
+ try:
+ bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
+ changed = True
+ except (ClientError, ParamValidationError) as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+ except KeyError as e:
+ try:
+ bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
+ changed = True
+ except (ClientError, ParamValidationError) as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+ except ValueError as e:
+ module.fail_json(msg=str(e))
+
+ # Wait 5 secs before getting the website_config again to give it time to update
+ time.sleep(5)
+
+ website_config = client_connection.get_bucket_website(Bucket=bucket_name)
+ module.exit_json(changed=changed, **camel_dict_to_snake_dict(website_config))
+
+
+def disable_bucket_as_website(client_connection, module):
+
+ changed = False
+ bucket_name = module.params.get("name")
+
+ try:
+ client_connection.get_bucket_website(Bucket=bucket_name)
+ except ClientError as e:
+ if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration':
+ module.exit_json(changed=changed)
+ else:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+
+ try:
+ client_connection.delete_bucket_website(Bucket=bucket_name)
+ changed = True
+ except ClientError as e:
+ module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
+
+ module.exit_json(changed=changed)
+
+
+def main():
+
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', required=True, choices=['present', 'absent']),
+ suffix=dict(type='str', required=False, default='index.html'),
+ error_key=dict(type='str', required=False),
+ redirect_all_requests=dict(type='str', required=False)
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ mutually_exclusive = [
+ ['redirect_all_requests', 'suffix'],
+ ['redirect_all_requests', 'error_key']
+ ])
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
+
+ if region:
+ client_connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params)
+ resource_connection = boto3_conn(module, conn_type='resource', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params)
+ else:
+ module.fail_json(msg="region must be specified")
+
+ state = module.params.get("state")
+
+ if state == 'present':
+ enable_or_update_bucket_as_website(client_connection, resource_connection, module)
+ elif state == 'absent':
+ disable_bucket_as_website(client_connection, module)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/sns_topic.py b/lib/ansible/modules/cloud/amazon/sns_topic.py
new file mode 100644
index 0000000000..e2b31484a1
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/sns_topic.py
@@ -0,0 +1,410 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# This is a free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This Ansible library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+module: sns_topic
+short_description: Manages AWS SNS topics and subscriptions
+description:
+ - The M(sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics.
+version_added: 2.0
+author:
+ - "Joel Thompson (@joelthompson)"
+ - "Fernando Jose Pando (@nand0p)"
+options:
+ name:
+ description:
+ - The name or ARN of the SNS topic to converge
+ required: True
+ state:
+ description:
+ - Whether to create or destroy an SNS topic
+ required: False
+ default: present
+ choices: ["absent", "present"]
+ display_name:
+ description:
+ - Display name of the topic
+ required: False
+ default: None
+ policy:
+ description:
+ - Policy to apply to the SNS topic
+ required: False
+ default: None
+ delivery_policy:
+ description:
+ - Delivery policy to apply to the SNS topic
+ required: False
+ default: None
+ subscriptions:
+ description:
+ - List of subscriptions to apply to the topic. Note that AWS requires
+ subscriptions to be confirmed, so you will need to confirm any new
+ subscriptions.
+ required: False
+ default: []
+ purge_subscriptions:
+ description:
+ - "Whether to purge any subscriptions not listed here. NOTE: AWS does not
+ allow you to purge any PendingConfirmation subscriptions, so if any
+ exist and would be purged, they are silently skipped. This means that
+ somebody could come back later and confirm the subscription. Sorry.
+ Blame Amazon."
+ required: False
+ default: True
+extends_documentation_fragment: aws
+requirements: [ "boto" ]
+"""
+
+EXAMPLES = """
+
+- name: Create alarm SNS topic
+ sns_topic:
+ name: "alarms"
+ state: present
+ display_name: "alarm SNS topic"
+ delivery_policy:
+ http:
+ defaultHealthyRetryPolicy:
+ minDelayTarget: 2
+ maxDelayTarget: 4
+ numRetries: 3
+ numMaxDelayRetries: 5
+ backoffFunction: "<linear|arithmetic|geometric|exponential>"
+ disableSubscriptionOverrides: True
+ defaultThrottlePolicy:
+ maxReceivesPerSecond: 10
+ subscriptions:
+ - endpoint: "my_email_address@example.com"
+ protocol: "email"
+ - endpoint: "my_mobile_number"
+ protocol: "sms"
+
+"""
+
+RETURN = '''
+sns_arn:
+ description: The ARN of the topic you are modifying
+ type: string
+ sample: "arn:aws:sns:us-east-1:123456789012:my_topic_name"
+
+sns_topic:
+ description: Dict of sns topic details
+ type: dict
+ sample:
+ name: sns-topic-name
+ state: present
+ display_name: default
+ policy: {}
+ delivery_policy: {}
+ subscriptions_new: []
+ subscriptions_existing: []
+ subscriptions_deleted: []
+ subscriptions_added: []
+ subscriptions_purge': false
+ check_mode: false
+ topic_created: false
+ topic_deleted: false
+ attributes_set: []
+'''
+
+import time
+import json
+import re
+
+try:
+ import boto.sns
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import connect_to_aws, ec2_argument_spec, get_aws_connection_info
+
+
+class SnsTopicManager(object):
+ """ Handles SNS Topic creation and destruction """
+
+ def __init__(self,
+ module,
+ name,
+ state,
+ display_name,
+ policy,
+ delivery_policy,
+ subscriptions,
+ purge_subscriptions,
+ check_mode,
+ region,
+ **aws_connect_params):
+
+ self.region = region
+ self.aws_connect_params = aws_connect_params
+ self.connection = self._get_boto_connection()
+ self.changed = False
+ self.module = module
+ self.name = name
+ self.state = state
+ self.display_name = display_name
+ self.policy = policy
+ self.delivery_policy = delivery_policy
+ self.subscriptions = subscriptions
+ self.subscriptions_existing = []
+ self.subscriptions_deleted = []
+ self.subscriptions_added = []
+ self.purge_subscriptions = purge_subscriptions
+ self.check_mode = check_mode
+ self.topic_created = False
+ self.topic_deleted = False
+ self.arn_topic = None
+ self.attributes_set = []
+
+ def _get_boto_connection(self):
+ try:
+ return connect_to_aws(boto.sns, self.region,
+ **self.aws_connect_params)
+ except BotoServerError as err:
+ self.module.fail_json(msg=err.message)
+
+ def _get_all_topics(self):
+ next_token = None
+ topics = []
+ while True:
+ try:
+ response = self.connection.get_all_topics(next_token)
+ except BotoServerError as err:
+ self.module.fail_json(msg=err.message)
+ topics.extend(response['ListTopicsResponse']['ListTopicsResult']['Topics'])
+ next_token = response['ListTopicsResponse']['ListTopicsResult']['NextToken']
+ if not next_token:
+ break
+ return [t['TopicArn'] for t in topics]
+
+
+ def _arn_topic_lookup(self):
+ # topic names cannot have colons, so this captures the full topic name
+ all_topics = self._get_all_topics()
+ lookup_topic = ':%s' % self.name
+ for topic in all_topics:
+ if topic.endswith(lookup_topic):
+ return topic
+
+
+ def _create_topic(self):
+ self.changed = True
+ self.topic_created = True
+ if not self.check_mode:
+ self.connection.create_topic(self.name)
+ self.arn_topic = self._arn_topic_lookup()
+ while not self.arn_topic:
+ time.sleep(3)
+ self.arn_topic = self._arn_topic_lookup()
+
+
+ def _set_topic_attrs(self):
+ topic_attributes = self.connection.get_topic_attributes(self.arn_topic) \
+ ['GetTopicAttributesResponse'] ['GetTopicAttributesResult'] \
+ ['Attributes']
+
+ if self.display_name and self.display_name != topic_attributes['DisplayName']:
+ self.changed = True
+ self.attributes_set.append('display_name')
+ if not self.check_mode:
+ self.connection.set_topic_attributes(self.arn_topic, 'DisplayName',
+ self.display_name)
+
+ if self.policy and self.policy != json.loads(topic_attributes['Policy']):
+ self.changed = True
+ self.attributes_set.append('policy')
+ if not self.check_mode:
+ self.connection.set_topic_attributes(self.arn_topic, 'Policy',
+ json.dumps(self.policy))
+
+ if self.delivery_policy and ('DeliveryPolicy' not in topic_attributes or \
+ self.delivery_policy != json.loads(topic_attributes['DeliveryPolicy'])):
+ self.changed = True
+ self.attributes_set.append('delivery_policy')
+ if not self.check_mode:
+ self.connection.set_topic_attributes(self.arn_topic, 'DeliveryPolicy',
+ json.dumps(self.delivery_policy))
+
+
+ def _canonicalize_endpoint(self, protocol, endpoint):
+ if protocol == 'sms':
+ return re.sub('[^0-9]*', '', endpoint)
+ return endpoint
+
+
+ def _get_topic_subs(self):
+ next_token = None
+ while True:
+ response = self.connection.get_all_subscriptions_by_topic(self.arn_topic, next_token)
+ self.subscriptions_existing.extend(response['ListSubscriptionsByTopicResponse'] \
+ ['ListSubscriptionsByTopicResult']['Subscriptions'])
+ next_token = response['ListSubscriptionsByTopicResponse'] \
+ ['ListSubscriptionsByTopicResult']['NextToken']
+ if not next_token:
+ break
+
+ def _set_topic_subs(self):
+ subscriptions_existing_list = []
+ desired_subscriptions = [(sub['protocol'],
+ self._canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
+ self.subscriptions]
+
+ if self.subscriptions_existing:
+ for sub in self.subscriptions_existing:
+ sub_key = (sub['Protocol'], sub['Endpoint'])
+ subscriptions_existing_list.append(sub_key)
+ if self.purge_subscriptions and sub_key not in desired_subscriptions and \
+ sub['SubscriptionArn'] != 'PendingConfirmation':
+ self.changed = True
+ self.subscriptions_deleted.append(sub_key)
+ if not self.check_mode:
+ self.connection.unsubscribe(sub['SubscriptionArn'])
+
+ for (protocol, endpoint) in desired_subscriptions:
+ if (protocol, endpoint) not in subscriptions_existing_list:
+ self.changed = True
+ self.subscriptions_added.append(sub)
+ if not self.check_mode:
+ self.connection.subscribe(self.arn_topic, protocol, endpoint)
+
+
+ def _delete_subscriptions(self):
+ # NOTE: subscriptions in 'PendingConfirmation' timeout in 3 days
+ # https://forums.aws.amazon.com/thread.jspa?threadID=85993
+ for sub in self.subscriptions_existing:
+ if sub['SubscriptionArn'] != 'PendingConfirmation':
+ self.subscriptions_deleted.append(sub['SubscriptionArn'])
+ self.changed = True
+ if not self.check_mode:
+ self.connection.unsubscribe(sub['SubscriptionArn'])
+
+
+ def _delete_topic(self):
+ self.topic_deleted = True
+ self.changed = True
+ if not self.check_mode:
+ self.connection.delete_topic(self.arn_topic)
+
+
+ def ensure_ok(self):
+ self.arn_topic = self._arn_topic_lookup()
+ if not self.arn_topic:
+ self._create_topic()
+ self._set_topic_attrs()
+ self._get_topic_subs()
+ self._set_topic_subs()
+
+ def ensure_gone(self):
+ self.arn_topic = self._arn_topic_lookup()
+ if self.arn_topic:
+ self._get_topic_subs()
+ if self.subscriptions_existing:
+ self._delete_subscriptions()
+ self._delete_topic()
+
+
+ def get_info(self):
+ info = {
+ 'name': self.name,
+ 'state': self.state,
+ 'display_name': self.display_name,
+ 'policy': self.policy,
+ 'delivery_policy': self.delivery_policy,
+ 'subscriptions_new': self.subscriptions,
+ 'subscriptions_existing': self.subscriptions_existing,
+ 'subscriptions_deleted': self.subscriptions_deleted,
+ 'subscriptions_added': self.subscriptions_added,
+ 'subscriptions_purge': self.purge_subscriptions,
+ 'check_mode': self.check_mode,
+ 'topic_created': self.topic_created,
+ 'topic_deleted': self.topic_deleted,
+ 'attributes_set': self.attributes_set
+ }
+
+ return info
+
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present',
+ 'absent']),
+ display_name=dict(type='str', required=False),
+ policy=dict(type='dict', required=False),
+ delivery_policy=dict(type='dict', required=False),
+ subscriptions=dict(default=[], type='list', required=False),
+ purge_subscriptions=dict(type='bool', default=True),
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ display_name = module.params.get('display_name')
+ policy = module.params.get('policy')
+ delivery_policy = module.params.get('delivery_policy')
+ subscriptions = module.params.get('subscriptions')
+ purge_subscriptions = module.params.get('purge_subscriptions')
+ check_mode = module.check_mode
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg="region must be specified")
+
+ sns_topic = SnsTopicManager(module,
+ name,
+ state,
+ display_name,
+ policy,
+ delivery_policy,
+ subscriptions,
+ purge_subscriptions,
+ check_mode,
+ region,
+ **aws_connect_params)
+
+ if state == 'present':
+ sns_topic.ensure_ok()
+
+ elif state == 'absent':
+ sns_topic.ensure_gone()
+
+ sns_facts = dict(changed=sns_topic.changed,
+ sns_arn=sns_topic.arn_topic,
+ sns_topic=sns_topic.get_info())
+
+ module.exit_json(**sns_facts)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/sqs_queue.py b/lib/ansible/modules/cloud/amazon/sqs_queue.py
new file mode 100644
index 0000000000..bad72f96bb
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/sqs_queue.py
@@ -0,0 +1,321 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: sqs_queue
+short_description: Creates or deletes AWS SQS queues.
+description:
+ - Create or delete AWS SQS queues.
+ - Update attributes on existing queues.
+version_added: "2.0"
+author:
+ - Alan Loi (@loia)
+ - Fernando Jose Pando (@nand0p)
+ - Nadir Lloret (@nadirollo)
+requirements:
+ - "boto >= 2.33.0"
+options:
+ state:
+ description:
+ - Create or delete the queue
+ required: false
+ choices: ['present', 'absent']
+ default: 'present'
+ name:
+ description:
+ - Name of the queue.
+ required: true
+ default_visibility_timeout:
+ description:
+ - The default visibility timeout in seconds.
+ required: false
+ default: null
+ message_retention_period:
+ description:
+ - The message retention period in seconds.
+ required: false
+ default: null
+ maximum_message_size:
+ description:
+ - The maximum message size in bytes.
+ required: false
+ default: null
+ delivery_delay:
+ description:
+ - The delivery delay in seconds.
+ required: false
+ default: null
+ receive_message_wait_time:
+ description:
+ - The receive message wait time in seconds.
+ required: false
+ default: null
+ policy:
+ description:
+ - The json dict policy to attach to queue
+ required: false
+ default: null
+ version_added: "2.1"
+ redrive_policy:
+ description:
+ - json dict with the redrive_policy (see example)
+ required: false
+ default: null
+ version_added: "2.2"
+extends_documentation_fragment:
+ - aws
+ - ec2
+"""
+
+RETURN = '''
+default_visibility_timeout:
+ description: The default visibility timeout in seconds.
+ returned: always
+ sample: 30
+delivery_delay:
+ description: The delivery delay in seconds.
+ returned: always
+ sample: 0
+maximum_message_size:
+ description: The maximum message size in bytes.
+ returned: always
+ sample: 262144
+message_retention_period:
+ description: The message retention period in seconds.
+ returned: always
+ sample: 345600
+name:
+ description: Name of the SQS Queue
+ returned: always
+ sample: "queuename-987d2de0"
+queue_arn:
+ description: The queue's Amazon resource name (ARN).
+ returned: on successful creation or update of the queue
+ sample: 'arn:aws:sqs:us-east-1:199999999999:queuename-987d2de0'
+receive_message_wait_time:
+ description: The receive message wait time in seconds.
+ returned: always
+ sample: 0
+region:
+ description: Region that the queue was created within
+ returned: always
+ sample: 'us-east-1'
+'''
+
+EXAMPLES = '''
+# Create SQS queue with redrive policy
+- sqs_queue:
+ name: my-queue
+ region: ap-southeast-2
+ default_visibility_timeout: 120
+ message_retention_period: 86400
+ maximum_message_size: 1024
+ delivery_delay: 30
+ receive_message_wait_time: 20
+ policy: "{{ json_dict }}"
+ redrive_policy:
+ maxReceiveCount: 5
+ deadLetterTargetArn: arn:aws:sqs:eu-west-1:123456789012:my-dead-queue
+
+# Delete SQS queue
+- sqs_queue:
+ name: my-queue
+ region: ap-southeast-2
+ state: absent
+'''
+
+import json
+import traceback
+
+try:
+ import boto.sqs
+ from boto.exception import BotoServerError, NoAuthHandlerFound
+ HAS_BOTO = True
+
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
+
+
+def create_or_update_sqs_queue(connection, module):
+ queue_name = module.params.get('name')
+
+ queue_attributes = dict(
+ default_visibility_timeout=module.params.get('default_visibility_timeout'),
+ message_retention_period=module.params.get('message_retention_period'),
+ maximum_message_size=module.params.get('maximum_message_size'),
+ delivery_delay=module.params.get('delivery_delay'),
+ receive_message_wait_time=module.params.get('receive_message_wait_time'),
+ policy=module.params.get('policy'),
+ redrive_policy=module.params.get('redrive_policy')
+ )
+
+ result = dict(
+ region=module.params.get('region'),
+ name=queue_name,
+ )
+ result.update(queue_attributes)
+
+ try:
+ queue = connection.get_queue(queue_name)
+ if queue:
+ # Update existing
+ result['changed'] = update_sqs_queue(queue, check_mode=module.check_mode, **queue_attributes)
+ else:
+ # Create new
+ if not module.check_mode:
+ queue = connection.create_queue(queue_name)
+ update_sqs_queue(queue, **queue_attributes)
+ result['changed'] = True
+
+ if not module.check_mode:
+ result['queue_arn'] = queue.get_attributes('QueueArn')['QueueArn']
+ result['default_visibility_timeout'] = queue.get_attributes('VisibilityTimeout')['VisibilityTimeout']
+ result['message_retention_period'] = queue.get_attributes('MessageRetentionPeriod')['MessageRetentionPeriod']
+ result['maximum_message_size'] = queue.get_attributes('MaximumMessageSize')['MaximumMessageSize']
+ result['delivery_delay'] = queue.get_attributes('DelaySeconds')['DelaySeconds']
+ result['receive_message_wait_time'] = queue.get_attributes('ReceiveMessageWaitTimeSeconds')['ReceiveMessageWaitTimeSeconds']
+
+ except BotoServerError:
+ result['msg'] = 'Failed to create/update sqs queue due to error: ' + traceback.format_exc()
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+def update_sqs_queue(queue,
+ check_mode=False,
+ default_visibility_timeout=None,
+ message_retention_period=None,
+ maximum_message_size=None,
+ delivery_delay=None,
+ receive_message_wait_time=None,
+ policy=None,
+ redrive_policy=None):
+ changed = False
+
+ changed = set_queue_attribute(queue, 'VisibilityTimeout', default_visibility_timeout,
+ check_mode=check_mode) or changed
+ changed = set_queue_attribute(queue, 'MessageRetentionPeriod', message_retention_period,
+ check_mode=check_mode) or changed
+ changed = set_queue_attribute(queue, 'MaximumMessageSize', maximum_message_size,
+ check_mode=check_mode) or changed
+ changed = set_queue_attribute(queue, 'DelaySeconds', delivery_delay,
+ check_mode=check_mode) or changed
+ changed = set_queue_attribute(queue, 'ReceiveMessageWaitTimeSeconds', receive_message_wait_time,
+ check_mode=check_mode) or changed
+ changed = set_queue_attribute(queue, 'Policy', policy,
+ check_mode=check_mode) or changed
+ changed = set_queue_attribute(queue, 'RedrivePolicy', redrive_policy,
+ check_mode=check_mode) or changed
+ return changed
+
+
+def set_queue_attribute(queue, attribute, value, check_mode=False):
+ if not value:
+ return False
+
+ try:
+ existing_value = queue.get_attributes(attributes=attribute)[attribute]
+ except:
+ existing_value = ''
+
+ # convert dict attributes to JSON strings (sort keys for comparing)
+ if attribute in ['Policy', 'RedrivePolicy']:
+ value = json.dumps(value, sort_keys=True)
+ if existing_value:
+ existing_value = json.dumps(json.loads(existing_value), sort_keys=True)
+
+ if str(value) != existing_value:
+ if not check_mode:
+ queue.set_attribute(attribute, value)
+ return True
+
+ return False
+
+
+def delete_sqs_queue(connection, module):
+ queue_name = module.params.get('name')
+
+ result = dict(
+ region=module.params.get('region'),
+ name=queue_name,
+ )
+
+ try:
+ queue = connection.get_queue(queue_name)
+ if queue:
+ if not module.check_mode:
+ connection.delete_queue(queue)
+ result['changed'] = True
+
+ else:
+ result['changed'] = False
+
+ except BotoServerError:
+ result['msg'] = 'Failed to delete sqs queue due to error: ' + traceback.format_exc()
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ name=dict(required=True, type='str'),
+ default_visibility_timeout=dict(type='int'),
+ message_retention_period=dict(type='int'),
+ maximum_message_size=dict(type='int'),
+ delivery_delay=dict(type='int'),
+ receive_message_wait_time=dict(type='int'),
+ policy=dict(type='dict', required=False),
+ redrive_policy=dict(type='dict', required=False),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg='region must be specified')
+
+ try:
+ connection = connect_to_aws(boto.sqs, region, **aws_connect_params)
+
+ except (NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+
+ state = module.params.get('state')
+ if state == 'present':
+ create_or_update_sqs_queue(connection, module)
+ elif state == 'absent':
+ delete_sqs_queue(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/sts_assume_role.py b/lib/ansible/modules/cloud/amazon/sts_assume_role.py
new file mode 100644
index 0000000000..d856947a7d
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/sts_assume_role.py
@@ -0,0 +1,157 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: sts_assume_role
+short_description: Assume a role using AWS Security Token Service and obtain temporary credentials
+description:
+ - Assume a role using AWS Security Token Service and obtain temporary credentials
+version_added: "2.0"
+author: Boris Ekelchik (@bekelchik)
+options:
+ role_arn:
+ description:
+ - The Amazon Resource Name (ARN) of the role that the caller is assuming (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs)
+ required: true
+ role_session_name:
+ description:
+ - Name of the role's session - will be used by CloudTrail
+ required: true
+ policy:
+ description:
+ - Supplemental policy to use in addition to assumed role's policies.
+ required: false
+ default: null
+ duration_seconds:
+ description:
+ - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds.
+ required: false
+ default: null
+ external_id:
+ description:
+ - A unique identifier that is used by third parties to assume a role in their customers' accounts.
+ required: false
+ default: null
+ mfa_serial_number:
+ description:
+ - he identification number of the MFA device that is associated with the user who is making the AssumeRole call.
+ required: false
+ default: null
+ mfa_token:
+ description:
+ - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA.
+ required: false
+ default: null
+notes:
+ - In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token
+extends_documentation_fragment:
+ - aws
+ - ec2
+'''
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Assume an existing role (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)
+sts_assume_role:
+ role_arn: "arn:aws:iam::123456789012:role/someRole"
+ role_session_name: "someRoleSession"
+register: assumed_role
+
+# Use the assumed role above to tag an instance in account 123456789012
+ec2_tag:
+ aws_access_key: "{{ assumed_role.sts_creds.access_key }}"
+ aws_secret_key: "{{ assumed_role.sts_creds.secret_key }}"
+ security_token: "{{ assumed_role.sts_creds.session_token }}"
+ resource: i-xyzxyz01
+ state: present
+ tags:
+ MyNewTag: value
+
+'''
+
+try:
+ import boto.sts
+ from boto.exception import BotoServerError
+ HAS_BOTO = True
+except ImportError:
+ HAS_BOTO = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import AnsibleAWSError, connect_to_aws, ec2_argument_spec, get_aws_connection_info
+
+
+def assume_role_policy(connection, module):
+
+ role_arn = module.params.get('role_arn')
+ role_session_name = module.params.get('role_session_name')
+ policy = module.params.get('policy')
+ duration_seconds = module.params.get('duration_seconds')
+ external_id = module.params.get('external_id')
+ mfa_serial_number = module.params.get('mfa_serial_number')
+ mfa_token = module.params.get('mfa_token')
+ changed = False
+
+ try:
+ assumed_role = connection.assume_role(role_arn, role_session_name, policy, duration_seconds, external_id, mfa_serial_number, mfa_token)
+ changed = True
+ except BotoServerError as e:
+ module.fail_json(msg=e)
+
+ module.exit_json(changed=changed, sts_creds=assumed_role.credentials.__dict__, sts_user=assumed_role.user.__dict__)
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ role_arn = dict(required=True, default=None),
+ role_session_name = dict(required=True, default=None),
+ duration_seconds = dict(required=False, default=None, type='int'),
+ external_id = dict(required=False, default=None),
+ policy = dict(required=False, default=None),
+ mfa_serial_number = dict(required=False, default=None),
+ mfa_token = dict(required=False, default=None)
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO:
+ module.fail_json(msg='boto required for this module')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.sts, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
+
+ try:
+ assume_role_policy(connection, module)
+ except BotoServerError as e:
+ module.fail_json(msg=e)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/amazon/sts_session_token.py b/lib/ansible/modules/cloud/amazon/sts_session_token.py
new file mode 100644
index 0000000000..4886b625fd
--- /dev/null
+++ b/lib/ansible/modules/cloud/amazon/sts_session_token.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: sts_session_token
+short_description: Obtain a session token from the AWS Security Token Service
+description:
+ - Obtain a session token from the AWS Security Token Service
+version_added: "2.2"
+author: Victor Costan (@pwnall)
+options:
+ duration_seconds:
+ description:
+ - The duration, in seconds, of the session token. See http://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html#API_GetSessionToken_RequestParameters for acceptable and default values.
+ required: false
+ default: null
+ mfa_serial_number:
+ description:
+ - The identification number of the MFA device that is associated with the user who is making the GetSessionToken call.
+ required: false
+ default: null
+ mfa_token:
+ description:
+ - The value provided by the MFA device, if the trust policy of the user requires MFA.
+ required: false
+ default: null
+notes:
+ - In order to use the session token in a following playbook task you must pass the I(access_key), I(access_secret) and I(access_token).
+extends_documentation_fragment:
+ - aws
+ - ec2
+requirements:
+ - boto3
+ - botocore
+ - python >= 2.6
+'''
+
+RETURN = """
+sts_creds:
+ description: The Credentials object returned by the AWS Security Token Service
+ returned: always
+ type: list
+ sample:
+ access_key: ASXXXXXXXXXXXXXXXXXX
+ expiration: "2016-04-08T11:59:47+00:00"
+ secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+changed:
+ description: True if obtaining the credentials succeeds
+ type: bool
+ returned: always
+"""
+
+
+EXAMPLES = '''
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Get a session token (more details: http://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html)
+sts_session_token:
+ duration: 3600
+register: session_credentials
+
+# Use the session token obtained above to tag an instance in account 123456789012
+ec2_tag:
+ aws_access_key: "{{ session_credentials.sts_creds.access_key }}"
+ aws_secret_key: "{{ session_credentials.sts_creds.secret_key }}"
+ security_token: "{{ session_credentials.sts_creds.session_token }}"
+ resource: i-xyzxyz01
+ state: present
+ tags:
+ MyNewTag: value
+
+'''
+
+try:
+ import boto3
+ from botocore.exceptions import ClientError
+ HAS_BOTO3 = True
+except ImportError:
+ HAS_BOTO3 = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ec2 import boto3_conn, ec2_argument_spec, get_aws_connection_info
+
+
+def normalize_credentials(credentials):
+ access_key = credentials.get('AccessKeyId', None)
+ secret_key = credentials.get('SecretAccessKey', None)
+ session_token = credentials.get('SessionToken', None)
+ expiration = credentials.get('Expiration', None)
+ return {
+ 'access_key': access_key,
+ 'secret_key': secret_key,
+ 'session_token': session_token,
+ 'expiration': expiration
+ }
+
+def get_session_token(connection, module):
+ duration_seconds = module.params.get('duration_seconds')
+ mfa_serial_number = module.params.get('mfa_serial_number')
+ mfa_token = module.params.get('mfa_token')
+ changed = False
+
+ args = {}
+ if duration_seconds is not None:
+ args['DurationSeconds'] = duration_seconds
+ if mfa_serial_number is not None:
+ args['SerialNumber'] = mfa_serial_number
+ if mfa_token is not None:
+ args['TokenCode'] = mfa_token
+
+ try:
+ response = connection.get_session_token(**args)
+ changed = True
+ except ClientError as e:
+ module.fail_json(msg=e)
+
+ credentials = normalize_credentials(response.get('Credentials', {}))
+ module.exit_json(changed=changed, sts_creds=credentials)
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ duration_seconds = dict(required=False, default=None, type='int'),
+ mfa_serial_number = dict(required=False, default=None),
+ mfa_token = dict(required=False, default=None)
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ if not HAS_BOTO3:
+ module.fail_json(msg='boto3 and botocore are required.')
+
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
+ if region:
+ connection = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs)
+ else:
+ module.fail_json(msg="region must be specified")
+
+ get_session_token(connection, module)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/atomic/__init__.py b/lib/ansible/modules/cloud/atomic/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/cloud/atomic/__init__.py
diff --git a/lib/ansible/modules/cloud/atomic/atomic_host.py b/lib/ansible/modules/cloud/atomic/atomic_host.py
new file mode 100644
index 0000000000..ae4cb06e28
--- /dev/null
+++ b/lib/ansible/modules/cloud/atomic/atomic_host.py
@@ -0,0 +1,110 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public licenses
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION='''
+---
+module: atomic_host
+short_description: Manage the atomic host platform
+description:
+ - Manage the atomic host platform
+ - Rebooting of Atomic host platform should be done outside this module
+version_added: "2.2"
+author: "Saravanan KR @krsacme"
+notes:
+ - Host should be an atomic platform (verified by existence of '/run/ostree-booted' file)
+requirements:
+ - atomic
+ - "python >= 2.6"
+options:
+ revision:
+ description:
+ - The version number of the atomic host to be deployed. Providing C(latest) will upgrade to the latest available version.
+ required: false
+ default: latest
+ aliases: ["version"]
+'''
+
+EXAMPLES = '''
+
+# Upgrade the atomic host platform to the latest version (atomic host upgrade)
+- atomic_host:
+ revision: latest
+
+# Deploy a specific revision as the atomic host (atomic host deploy 23.130)
+- atomic_host:
+ revision: 23.130
+'''
+
+RETURN = '''
+msg:
+ description: The command standard output
+ returned: always
+ type: string
+ sample: 'Already on latest'
+'''
+
+def core(module):
+ revision = module.params['revision']
+ args = []
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+
+ if revision == 'latest':
+ args = ['atomic', 'host', 'upgrade']
+ else:
+ args = ['atomic', 'host', 'deploy', revision]
+
+ out = {}
+ err = {}
+ rc = 0
+
+ rc, out, err = module.run_command(args, check_rc=False)
+
+ if rc == 77 and revision == 'latest':
+ module.exit_json(msg="Already on latest", changed=False)
+ elif rc != 0:
+ module.fail_json(rc=rc, msg=err)
+ else:
+ module.exit_json(msg=out, changed=True)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ revision = dict(default='latest', required=False, aliases=["version"]),
+ ),
+ )
+
+ # Verify that the platform is atomic host
+ if not os.path.exists("/run/ostree-booted"):
+ module.fail_json(msg="Module atomic_host is applicable for Atomic Host Platforms only")
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/atomic/atomic_image.py b/lib/ansible/modules/cloud/atomic/atomic_image.py
new file mode 100644
index 0000000000..8210a1d3b8
--- /dev/null
+++ b/lib/ansible/modules/cloud/atomic/atomic_image.py
@@ -0,0 +1,143 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION='''
+---
+module: atomic_image
+short_description: Manage the container images on the atomic host platform
+description:
+ - Manage the container images on the atomic host platform
+ - Allows to execute the commands on the container images
+version_added: "2.2"
+author: "Saravanan KR @krsacme"
+notes:
+ - Host should be support C(atomic) command
+requirements:
+ - atomic
+ - "python >= 2.6"
+options:
+ name:
+ description:
+ - Name of the container image
+ required: True
+ default: null
+ state:
+ description:
+ - The state of the container image.
+ - The state C(latest) will ensure container image is upgraded to the latest version and forcefully restart container, if running.
+ required: False
+ choices: ["present", "absent", "latest"]
+ default: latest
+ started:
+ description:
+ - Start or Stop the continer
+ required: False
+ choices: ["yes", "no"]
+ default: yes
+'''
+
+EXAMPLES = '''
+
+# Execute the run command on rsyslog container image (atomic run rhel7/rsyslog)
+- atomic_image:
+ name: rhel7/rsyslog
+ state: latest
+
+'''
+
+RETURN = '''
+msg:
+ description: The command standard output
+ returned: always
+ type: string
+ sample: [u'Using default tag: latest ...']
+'''
+
+def do_upgrade(module, image):
+ args = ['atomic', 'update', '--force', image]
+ rc, out, err = module.run_command(args, check_rc=False)
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=err)
+ elif 'Image is up to date' in out:
+ return False
+
+ return True
+
+
+def core(module):
+ image = module.params['name']
+ state = module.params['state']
+ started = module.params['started']
+ is_upgraded = False
+
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C')
+
+ if state == 'present' or state == 'latest':
+ if state == 'latest':
+ is_upgraded = do_upgrade(module, image)
+
+ if started:
+ args = ['atomic', 'run', image]
+ else:
+ args = ['atomic', 'install', image]
+ elif state == 'absent':
+ args = ['atomic', 'uninstall', image]
+
+ out = {}
+ err = {}
+ rc = 0
+ rc, out, err = module.run_command(args, check_rc=False)
+
+ if rc < 0:
+ module.fail_json(rc=rc, msg=err)
+ elif rc == 1 and 'already present' in err:
+ module.exit_json(restult=err, changed=is_upgraded)
+ elif started and 'Container is running' in out:
+ module.exit_json(result=out, changed=is_upgraded)
+ else:
+ module.exit_json(msg=out, changed=True)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(default=None, required=True),
+ state = dict(default='latest', choices=['present', 'absent', 'latest']),
+ started = dict(default='yes', type='bool'),
+ ),
+ )
+
+ # Verify that the platform supports atomic command
+ rc, out, err = module.run_command('atomic -v', check_rc=False)
+ if rc != 0:
+ module.fail_json(msg="Error in running atomic command", err=err)
+
+ try:
+ core(module)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_deployment.py b/lib/ansible/modules/cloud/azure/azure_rm_deployment.py
new file mode 100644
index 0000000000..88ecf0cea0
--- /dev/null
+++ b/lib/ansible/modules/cloud/azure/azure_rm_deployment.py
@@ -0,0 +1,665 @@
+#!/usr/bin/python
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'committer',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: azure_rm_deployment
+
+short_description: Create or destroy Azure Resource Manager template deployments
+
+version_added: "2.1"
+
+description:
+ - "Create or destroy Azure Resource Manager template deployments via the Azure SDK for Python.
+ You can find some quick start templates in GitHub here https://github.com/azure/azure-quickstart-templates.
+ For more information on Azue resource manager templates see https://azure.microsoft.com/en-us/documentation/articles/resource-group-template-deploy/."
+
+options:
+ resource_group_name:
+ description:
+ - The resource group name to use or create to host the deployed template
+ required: true
+ location:
+ description:
+ - The geo-locations in which the resource group will be located.
+ required: false
+ default: westus
+ deployment_mode:
+ description:
+ - In incremental mode, resources are deployed without deleting existing resources that are not included in the template.
+ In complete mode resources are deployed and existing resources in the resource group not included in the template are deleted.
+ required: false
+ default: incremental
+ choices:
+ - complete
+ - incremental
+ state:
+ description:
+ - If state is "present", template will be created. If state is "present" and if deployment exists, it will be
+ updated. If state is "absent", stack will be removed.
+ default: present
+ required: false
+ choices:
+ - present
+ - absent
+ template:
+ description:
+ - A hash containing the templates inline. This parameter is mutually exclusive with 'template_link'.
+ Either one of them is required if "state" parameter is "present".
+ required: false
+ default: null
+ template_link:
+ description:
+ - Uri of file containing the template body. This parameter is mutually exclusive with 'template'. Either one
+ of them is required if "state" parameter is "present".
+ required: false
+ default: null
+ parameters:
+ description:
+ - A hash of all the required template variables for the deployment template. This parameter is mutually exclusive
+ with 'parameters_link'. Either one of them is required if "state" parameter is "present".
+ required: false
+ default: null
+ parameters_link:
+ description:
+ - Uri of file containing the parameters body. This parameter is mutually exclusive with 'parameters'. Either
+ one of them is required if "state" parameter is "present".
+ required: false
+ default: null
+ deployment_name:
+ description:
+ - The name of the deployment to be tracked in the resource group deployment history. Re-using a deployment name
+ will overwrite the previous value in the resource group's deployment history.
+ default: ansible-arm
+ wait_for_deployment_completion:
+ description:
+ - Whether or not to block until the deployment has completed.
+ default: yes
+ choices: ['yes', 'no']
+ wait_for_deployment_polling_period:
+ description:
+ - Time (in seconds) to wait between polls when waiting for deployment completion.
+ default: 10
+
+extends_documentation_fragment:
+ - azure
+
+author:
+ - David Justice (@devigned)
+ - Laurent Mazuel (@lmazuel)
+ - Andre Price (@obsoleted)
+
+'''
+
+EXAMPLES = '''
+# Destroy a template deployment
+- name: Destroy Azure Deploy
+ azure_rm_deployment:
+ state: absent
+ subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ resource_group_name: dev-ops-cle
+
+# Create or update a template deployment based on uris using parameter and template links
+- name: Create Azure Deploy
+ azure_rm_deployment:
+ state: present
+ resource_group_name: dev-ops-cle
+ template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.json'
+ parameters_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-simple-linux/azuredeploy.parameters.json'
+
+# Create or update a template deployment based on a uri to the template and parameters specified inline.
+# This deploys a VM with SSH support for a given public key, then stores the result in 'azure_vms'. The result is then
+# used to create a new host group. This host group is then used to wait for each instance to respond to the public IP SSH.
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: Destroy Azure Deploy
+ azure_rm_deployment:
+ state: absent
+ subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ resource_group_name: dev-ops-cle
+
+ - name: Create Azure Deploy
+ azure_rm_deployment:
+ state: present
+ subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ resource_group_name: dev-ops-cle
+ parameters:
+ newStorageAccountName:
+ value: devopsclestorage1
+ adminUsername:
+ value: devopscle
+ dnsNameForPublicIP:
+ value: devopscleazure
+ location:
+ value: West US
+ vmSize:
+ value: Standard_A2
+ vmName:
+ value: ansibleSshVm
+ sshKeyData:
+ value: YOUR_SSH_PUBLIC_KEY
+ template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/101-vm-sshkey/azuredeploy.json'
+ register: azure
+
+ - name: Add new instance to host group
+ add_host:
+ hostname: '{{ item['ips'][0].public_ip }}'
+ groupname: azure_vms
+ with_items: "{{ azure.deployment.instances }}"
+
+ - hosts: azure_vms
+ user: devopscle
+ tasks:
+ - name: Wait for SSH to come up
+ wait_for:
+ port: 22
+ timeout: 2000
+ state: started
+ - name: echo the hostname of the vm
+ shell: hostname
+
+# Deploy an Azure WebApp running a hello world'ish node app
+- name: Create Azure WebApp Deployment at http://devopscleweb.azurewebsites.net/hello.js
+ azure_rm_deployment:
+ state: present
+ subscription_id: cbbdaed0-fea9-4693-bf0c-d446ac93c030
+ resource_group_name: dev-ops-cle-webapp
+ parameters:
+ repoURL:
+ value: 'https://github.com/devigned/az-roadshow-oss.git'
+ siteName:
+ value: devopscleweb
+ hostingPlanName:
+ value: someplan
+ siteLocation:
+ value: westus
+ sku:
+ value: Standard
+ template_link: 'https://raw.githubusercontent.com/azure/azure-quickstart-templates/master/201-web-app-github-deploy/azuredeploy.json'
+
+# Create or update a template deployment based on an inline template and parameters
+- name: Create Azure Deploy
+ azure_rm_deployment:
+ state: present
+ subscription_id: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ resource_group_name: dev-ops-cle
+
+ template:
+ $schema: "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#"
+ contentVersion: "1.0.0.0"
+ parameters:
+ newStorageAccountName:
+ type: "string"
+ metadata:
+ description: "Unique DNS Name for the Storage Account where the Virtual Machine's disks will be placed."
+ adminUsername:
+ type: "string"
+ metadata:
+ description: "User name for the Virtual Machine."
+ adminPassword:
+ type: "securestring"
+ metadata:
+ description: "Password for the Virtual Machine."
+ dnsNameForPublicIP:
+ type: "string"
+ metadata:
+ description: "Unique DNS Name for the Public IP used to access the Virtual Machine."
+ ubuntuOSVersion:
+ type: "string"
+ defaultValue: "14.04.2-LTS"
+ allowedValues:
+ - "12.04.5-LTS"
+ - "14.04.2-LTS"
+ - "15.04"
+ metadata:
+ description: "The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version. Allowed values: 12.04.5-LTS, 14.04.2-LTS, 15.04."
+ variables:
+ location: "West US"
+ imagePublisher: "Canonical"
+ imageOffer: "UbuntuServer"
+ OSDiskName: "osdiskforlinuxsimple"
+ nicName: "myVMNic"
+ addressPrefix: "192.0.2.0/24"
+ subnetName: "Subnet"
+ subnetPrefix: "10.0.0.0/24"
+ storageAccountType: "Standard_LRS"
+ publicIPAddressName: "myPublicIP"
+ publicIPAddressType: "Dynamic"
+ vmStorageAccountContainerName: "vhds"
+ vmName: "MyUbuntuVM"
+ vmSize: "Standard_D1"
+ virtualNetworkName: "MyVNET"
+ vnetID: "[resourceId('Microsoft.Network/virtualNetworks',variables('virtualNetworkName'))]"
+ subnetRef: "[concat(variables('vnetID'),'/subnets/',variables('subnetName'))]"
+ resources:
+ - type: "Microsoft.Storage/storageAccounts"
+ name: "[parameters('newStorageAccountName')]"
+ apiVersion: "2015-05-01-preview"
+ location: "[variables('location')]"
+ properties:
+ accountType: "[variables('storageAccountType')]"
+ - apiVersion: "2015-05-01-preview"
+ type: "Microsoft.Network/publicIPAddresses"
+ name: "[variables('publicIPAddressName')]"
+ location: "[variables('location')]"
+ properties:
+ publicIPAllocationMethod: "[variables('publicIPAddressType')]"
+ dnsSettings:
+ domainNameLabel: "[parameters('dnsNameForPublicIP')]"
+ - type: "Microsoft.Network/virtualNetworks"
+ apiVersion: "2015-05-01-preview"
+ name: "[variables('virtualNetworkName')]"
+ location: "[variables('location')]"
+ properties:
+ addressSpace:
+ addressPrefixes:
+ - "[variables('addressPrefix')]"
+ subnets:
+ -
+ name: "[variables('subnetName')]"
+ properties:
+ addressPrefix: "[variables('subnetPrefix')]"
+ - type: "Microsoft.Network/networkInterfaces"
+ apiVersion: "2015-05-01-preview"
+ name: "[variables('nicName')]"
+ location: "[variables('location')]"
+ dependsOn:
+ - "[concat('Microsoft.Network/publicIPAddresses/', variables('publicIPAddressName'))]"
+ - "[concat('Microsoft.Network/virtualNetworks/', variables('virtualNetworkName'))]"
+ properties:
+ ipConfigurations:
+ -
+ name: "ipconfig1"
+ properties:
+ privateIPAllocationMethod: "Dynamic"
+ publicIPAddress:
+ id: "[resourceId('Microsoft.Network/publicIPAddresses',variables('publicIPAddressName'))]"
+ subnet:
+ id: "[variables('subnetRef')]"
+ - type: "Microsoft.Compute/virtualMachines"
+ apiVersion: "2015-06-15"
+ name: "[variables('vmName')]"
+ location: "[variables('location')]"
+ dependsOn:
+ - "[concat('Microsoft.Storage/storageAccounts/', parameters('newStorageAccountName'))]"
+ - "[concat('Microsoft.Network/networkInterfaces/', variables('nicName'))]"
+ properties:
+ hardwareProfile:
+ vmSize: "[variables('vmSize')]"
+ osProfile:
+ computername: "[variables('vmName')]"
+ adminUsername: "[parameters('adminUsername')]"
+ adminPassword: "[parameters('adminPassword')]"
+ storageProfile:
+ imageReference:
+ publisher: "[variables('imagePublisher')]"
+ offer: "[variables('imageOffer')]"
+ sku: "[parameters('ubuntuOSVersion')]"
+ version: "latest"
+ osDisk:
+ name: "osdisk"
+ vhd:
+ uri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net/',variables('vmStorageAccountContainerName'),'/',variables('OSDiskName'),'.vhd')]"
+ caching: "ReadWrite"
+ createOption: "FromImage"
+ networkProfile:
+ networkInterfaces:
+ -
+ id: "[resourceId('Microsoft.Network/networkInterfaces',variables('nicName'))]"
+ diagnosticsProfile:
+ bootDiagnostics:
+ enabled: "true"
+ storageUri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net')]"
+ parameters:
+ newStorageAccountName:
+ value: devopsclestorage
+ adminUsername:
+ value: devopscle
+ adminPassword:
+ value: Password1!
+ dnsNameForPublicIP:
+ value: devopscleazure
+'''
+
+RETURN = '''
+deployment:
+ description: Deployment details
+ type: dict
+ returned: always
+ sample:
+ group_name:
+ description: Name of the resource group
+ type: string
+ returned: always
+ id:
+ description: The Azure ID of the deployment
+ type: string
+ returned: always
+ instances:
+ description: Provides the public IP addresses for each VM instance.
+ type: list
+ returned: always
+ name:
+ description: Name of the deployment
+ type: string
+ returned: always
+ outputs:
+ description: Dictionary of outputs received from the deployment
+ type: dict
+ returned: always
+'''
+
+PREREQ_IMPORT_ERROR = None
+
+try:
+ import time
+ import yaml
+except ImportError as exc:
+ IMPORT_ERROR = "Error importing module prerequisites: %s" % exc
+
+from ansible.module_utils.azure_rm_common import *
+
+try:
+ from itertools import chain
+ from azure.common.credentials import ServicePrincipalCredentials
+ from azure.common.exceptions import CloudError
+ from azure.mgmt.resource.resources.models import (DeploymentProperties,
+ ParametersLink,
+ TemplateLink,
+ Deployment,
+ ResourceGroup,
+ Dependency)
+ from azure.mgmt.resource.resources import ResourceManagementClient
+ from azure.mgmt.network import NetworkManagementClient
+
+except ImportError:
+ # This is handled in azure_rm_common
+ pass
+
+
+class AzureRMDeploymentManager(AzureRMModuleBase):
+
+ def __init__(self):
+
+ self.module_arg_spec = dict(
+ resource_group_name=dict(type='str', required=True, aliases=['resource_group']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ template=dict(type='dict', default=None),
+ parameters=dict(type='dict', default=None),
+ template_link=dict(type='str', default=None),
+ parameters_link=dict(type='str', default=None),
+ location=dict(type='str', default="westus"),
+ deployment_mode=dict(type='str', default='incremental', choices=['complete', 'incremental']),
+ deployment_name=dict(type='str', default="ansible-arm"),
+ wait_for_deployment_completion=dict(type='bool', default=True),
+ wait_for_deployment_polling_period=dict(type='int', default=10)
+ )
+
+ mutually_exclusive = [('template', 'template_link'),
+ ('parameters', 'parameters_link')]
+
+ self.resource_group_name = None
+ self.state = None
+ self.template = None
+ self.parameters = None
+ self.template_link = None
+ self.parameters_link = None
+ self.location = None
+ self.deployment_mode = None
+ self.deployment_name = None
+ self.wait_for_deployment_completion = None
+ self.wait_for_deployment_polling_period = None
+ self.tags = None
+
+ self.results = dict(
+ deployment=dict(),
+ changed=False,
+ msg=""
+ )
+
+ super(AzureRMDeploymentManager, self).__init__(derived_arg_spec=self.module_arg_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=False)
+
+ def exec_module(self, **kwargs):
+
+ if PREREQ_IMPORT_ERROR:
+ self.fail(PREREQ_IMPORT_ERROR)
+
+ for key in self.module_arg_spec.keys() + ['tags']:
+ setattr(self, key, kwargs[key])
+
+ if self.state == 'present':
+ deployment = self.deploy_template()
+ self.results['deployment'] = dict(
+ name=deployment.name,
+ group_name=self.resource_group_name,
+ id=deployment.id,
+ outputs=deployment.properties.outputs,
+ instances=self._get_instances(deployment)
+ )
+ self.results['changed'] = True
+ self.results['msg'] = 'deployment succeeded'
+ else:
+ if self.resource_group_exists(self.resource_group_name):
+ self.destroy_resource_group()
+ self.results['changed'] = True
+ self.results['msg'] = "deployment deleted"
+
+ return self.results
+
+ def deploy_template(self):
+ """
+ Deploy the targeted template and parameters
+ :param module: Ansible module containing the validated configuration for the deployment template
+ :param client: resource management client for azure
+ :param conn_info: connection info needed
+ :return:
+ """
+
+ deploy_parameter = DeploymentProperties(self.deployment_mode)
+ if not self.parameters_link:
+ deploy_parameter.parameters = self.parameters
+ else:
+ deploy_parameter.parameters_link = ParametersLink(
+ uri=self.parameters_link
+ )
+ if not self.template_link:
+ deploy_parameter.template = self.template
+ else:
+ deploy_parameter.template_link = TemplateLink(
+ uri=self.template_link
+ )
+
+ params = ResourceGroup(location=self.location, tags=self.tags)
+
+ try:
+ self.rm_client.resource_groups.create_or_update(self.resource_group_name, params)
+ except CloudError as exc:
+ self.fail("Resource group create_or_update failed with status code: %s and message: %s" %
+ (exc.status_code, exc.message))
+ try:
+ result = self.rm_client.deployments.create_or_update(self.resource_group_name,
+ self.deployment_name,
+ deploy_parameter)
+
+ deployment_result = self.get_poller_result(result)
+ if self.wait_for_deployment_completion:
+ while deployment_result.properties is None or deployment_result.properties.provisioning_state not in ['Canceled', 'Failed', 'Deleted',
+ 'Succeeded']:
+ time.sleep(self.wait_for_deployment_polling_period)
+ deployment_result = self.rm_client.deployments.get(self.resource_group_name, self.deployment_name)
+ except CloudError as exc:
+ failed_deployment_operations = self._get_failed_deployment_operations(self.deployment_name)
+ self.log("Deployment failed %s: %s" % (exc.status_code, exc.message))
+ self.fail("Deployment failed with status code: %s and message: %s" % (exc.status_code, exc.message),
+ failed_deployment_operations=failed_deployment_operations)
+
+ if self.wait_for_deployment_completion and deployment_result.properties.provisioning_state != 'Succeeded':
+ self.log("provisioning state: %s" % deployment_result.properties.provisioning_state)
+ failed_deployment_operations = self._get_failed_deployment_operations(self.deployment_name)
+ self.fail('Deployment failed. Deployment id: %s' % deployment_result.id,
+ failed_deployment_operations=failed_deployment_operations)
+
+ return deployment_result
+
+ def destroy_resource_group(self):
+ """
+ Destroy the targeted resource group
+ """
+ try:
+ result = self.rm_client.resource_groups.delete(self.resource_group_name)
+ result.wait() # Blocking wait till the delete is finished
+ except CloudError as e:
+ if e.status_code == 404 or e.status_code == 204:
+ return
+ else:
+ self.fail("Delete resource group and deploy failed with status code: %s and message: %s" %
+ (e.status_code, e.message))
+
+ def resource_group_exists(self, resource_group):
+ '''
+ Return True/False based on existence of requested resource group.
+
+ :param resource_group: string. Name of a resource group.
+ :return: boolean
+ '''
+ try:
+ self.rm_client.resource_groups.get(resource_group)
+ except CloudError:
+ return False
+ return True
+
+ def _get_failed_nested_operations(self, current_operations):
+ new_operations = []
+ for operation in current_operations:
+ if operation.properties.provisioning_state == 'Failed':
+ new_operations.append(operation)
+ if operation.properties.target_resource and \
+ 'Microsoft.Resources/deployments' in operation.properties.target_resource.id:
+ nested_deployment = operation.properties.target_resource.resource_name
+ try:
+ nested_operations = self.rm_client.deployment_operations.list(self.resource_group_name,
+ nested_deployment)
+ except CloudError as exc:
+ self.fail("List nested deployment operations failed with status code: %s and message: %s" %
+ (e.status_code, e.message))
+ new_nested_operations = self._get_failed_nested_operations(nested_operations)
+ new_operations += new_nested_operations
+ return new_operations
+
+ def _get_failed_deployment_operations(self, deployment_name):
+ results = []
+ # time.sleep(15) # there is a race condition between when we ask for deployment status and when the
+ # # status is available.
+
+ try:
+ operations = self.rm_client.deployment_operations.list(self.resource_group_name, deployment_name)
+ except CloudError as exc:
+ self.fail("Get deployment failed with status code: %s and message: %s" %
+ (exc.status_code, exc.message))
+ try:
+ results = [
+ dict(
+ id=op.id,
+ operation_id=op.operation_id,
+ status_code=op.properties.status_code,
+ status_message=op.properties.status_message,
+ target_resource=dict(
+ id=op.properties.target_resource.id,
+ resource_name=op.properties.target_resource.resource_name,
+ resource_type=op.properties.target_resource.resource_type
+ ) if op.properties.target_resource else None,
+ provisioning_state=op.properties.provisioning_state,
+ )
+ for op in self._get_failed_nested_operations(operations)
+ ]
+ except:
+ # If we fail here, the original error gets lost and user receives wrong error message/stacktrace
+ pass
+ self.log(dict(failed_deployment_operations=results), pretty_print=True)
+ return results
+
+ def _get_instances(self, deployment):
+ dep_tree = self._build_hierarchy(deployment.properties.dependencies)
+ vms = self._get_dependencies(dep_tree, resource_type="Microsoft.Compute/virtualMachines")
+ vms_and_nics = [(vm, self._get_dependencies(vm['children'], "Microsoft.Network/networkInterfaces"))
+ for vm in vms]
+ vms_and_ips = [(vm['dep'], self._nic_to_public_ips_instance(nics))
+ for vm, nics in vms_and_nics]
+ return [dict(vm_name=vm.resource_name, ips=[self._get_ip_dict(ip)
+ for ip in ips]) for vm, ips in vms_and_ips if len(ips) > 0]
+
+ def _get_dependencies(self, dep_tree, resource_type):
+ matches = [value for value in dep_tree.values() if value['dep'].resource_type == resource_type]
+ for child_tree in [value['children'] for value in dep_tree.values()]:
+ matches += self._get_dependencies(child_tree, resource_type)
+ return matches
+
+ def _build_hierarchy(self, dependencies, tree=None):
+ tree = dict(top=True) if tree is None else tree
+ for dep in dependencies:
+ if dep.resource_name not in tree:
+ tree[dep.resource_name] = dict(dep=dep, children=dict())
+ if isinstance(dep, Dependency) and dep.depends_on is not None and len(dep.depends_on) > 0:
+ self._build_hierarchy(dep.depends_on, tree[dep.resource_name]['children'])
+
+ if 'top' in tree:
+ tree.pop('top', None)
+ keys = list(tree.keys())
+ for key1 in keys:
+ for key2 in keys:
+ if key2 in tree and key1 in tree[key2]['children'] and key1 in tree:
+ tree[key2]['children'][key1] = tree[key1]
+ tree.pop(key1)
+ return tree
+
+ def _get_ip_dict(self, ip):
+ ip_dict = dict(name=ip.name,
+ id=ip.id,
+ public_ip=ip.ip_address,
+ public_ip_allocation_method=str(ip.public_ip_allocation_method)
+ )
+ if ip.dns_settings:
+ ip_dict['dns_settings'] = {
+ 'domain_name_label':ip.dns_settings.domain_name_label,
+ 'fqdn':ip.dns_settings.fqdn
+ }
+ return ip_dict
+
+ def _nic_to_public_ips_instance(self, nics):
+ return [self.network_client.public_ip_addresses.get(public_ip_id.split('/')[4], public_ip_id.split('/')[-1])
+ for nic_obj in [self.network_client.network_interfaces.get(self.resource_group_name,
+ nic['dep'].resource_name) for nic in nics]
+ for public_ip_id in [ip_conf_instance.public_ip_address.id
+ for ip_conf_instance in nic_obj.ip_configurations
+ if ip_conf_instance.public_ip_address]]
+
+
+def main():
+ AzureRMDeploymentManager()
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
+
diff --git a/lib/ansible/modules/cloud/centurylink/clc_aa_policy.py b/lib/ansible/modules/cloud/centurylink/clc_aa_policy.py
new file mode 100644
index 0000000000..8693f4c774
--- /dev/null
+++ b/lib/ansible/modules/cloud/centurylink/clc_aa_policy.py
@@ -0,0 +1,360 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: clc_aa_policy
+short_description: Create or Delete Anti Affinity Policies at CenturyLink Cloud.
+description:
+ - An Ansible module to Create or Delete Anti Affinity Policies at CenturyLink Cloud.
+version_added: "2.0"
+options:
+ name:
+ description:
+ - The name of the Anti Affinity Policy.
+ required: True
+ location:
+ description:
+ - Datacenter in which the policy lives/should live.
+ required: True
+ state:
+ description:
+ - Whether to create or delete the policy.
+ required: False
+ default: present
+ choices: ['present','absent']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ default: True
+ required: False
+ choices: [True, False]
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+---
+- name: Create AA Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create an Anti Affinity Policy
+ clc_aa_policy:
+ name: Hammer Time
+ location: UK3
+ state: present
+ register: policy
+
+ - name: debug
+ debug:
+ var: policy
+
+---
+- name: Delete AA Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Anti Affinity Policy
+ clc_aa_policy:
+ name: Hammer Time
+ location: UK3
+ state: absent
+ register: policy
+
+ - name: debug
+ debug:
+ var: policy
+'''
+
+RETURN = '''
+policy:
+ description: The anti affinity policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "id":"1a28dd0988984d87b9cd61fa8da15424",
+ "name":"test_aa_policy",
+ "location":"UC1",
+ "links":[
+ {
+ "rel":"self",
+ "href":"/v2/antiAffinityPolicies/wfad/1a28dd0988984d87b9cd61fa8da15424",
+ "verbs":[
+ "GET",
+ "DELETE",
+ "PUT"
+ ]
+ },
+ {
+ "rel":"location",
+ "href":"/v2/datacenters/wfad/UC1",
+ "id":"uc1",
+ "name":"UC1 - US West (Santa Clara)"
+ }
+ ]
+ }
+'''
+
+__version__ = '${version}'
+
+import os
+
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ClcAntiAffinityPolicy:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ self.policy_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ location=dict(required=True),
+ wait=dict(default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ return argument_spec
+
+ # Module Behavior Goodness
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+
+ self._set_clc_credentials_from_env()
+ self.policy_dict = self._get_policies_for_datacenter(p)
+
+ if p['state'] == "absent":
+ changed, policy = self._ensure_policy_is_absent(p)
+ else:
+ changed, policy = self._ensure_policy_is_present(p)
+
+ if hasattr(policy, 'data'):
+ policy = policy.data
+ elif hasattr(policy, '__dict__'):
+ policy = policy.__dict__
+
+ self.module.exit_json(changed=changed, policy=policy)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_policies_for_datacenter(self, p):
+ """
+ Get the Policies for a datacenter by calling the CLC API.
+ :param p: datacenter to get policies from
+ :return: policies in the datacenter
+ """
+ response = {}
+
+ policies = self.clc.v2.AntiAffinity.GetAll(location=p['location'])
+
+ for policy in policies:
+ response[policy.name] = policy
+ return response
+
+ def _create_policy(self, p):
+ """
+ Create an Anti Affinity Policy using the CLC API.
+ :param p: datacenter to create policy in
+ :return: response dictionary from the CLC API.
+ """
+ try:
+ return self.clc.v2.AntiAffinity.Create(
+ name=p['name'],
+ location=p['location'])
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create anti affinity policy : {0}. {1}'.format(
+ p['name'], ex.response_text
+ ))
+
+ def _delete_policy(self, p):
+ """
+ Delete an Anti Affinity Policy using the CLC API.
+ :param p: datacenter to delete a policy from
+ :return: none
+ """
+ try:
+ policy = self.policy_dict[p['name']]
+ policy.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete anti affinity policy : {0}. {1}'.format(
+ p['name'], ex.response_text
+ ))
+
+ def _policy_exists(self, policy_name):
+ """
+ Check to see if an Anti Affinity Policy exists
+ :param policy_name: name of the policy
+ :return: boolean of if the policy exists
+ """
+ if policy_name in self.policy_dict:
+ return self.policy_dict.get(policy_name)
+
+ return False
+
+ def _ensure_policy_is_absent(self, p):
+ """
+ Makes sure that a policy is absent
+ :param p: dictionary of policy name
+ :return: tuple of if a deletion occurred and the name of the policy that was deleted
+ """
+ changed = False
+ if self._policy_exists(policy_name=p['name']):
+ changed = True
+ if not self.module.check_mode:
+ self._delete_policy(p)
+ return changed, None
+
+ def _ensure_policy_is_present(self, p):
+ """
+ Ensures that a policy is present
+ :param p: dictionary of a policy name
+ :return: tuple of if an addition occurred and the name of the policy that was added
+ """
+ changed = False
+ policy = self._policy_exists(policy_name=p['name'])
+ if not policy:
+ changed = True
+ policy = None
+ if not self.module.check_mode:
+ policy = self._create_policy(p)
+ return changed, policy
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcAntiAffinityPolicy._define_module_argument_spec(),
+ supports_check_mode=True)
+ clc_aa_policy = ClcAntiAffinityPolicy(module)
+ clc_aa_policy.process_request()
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/centurylink/clc_alert_policy.py b/lib/ansible/modules/cloud/centurylink/clc_alert_policy.py
new file mode 100644
index 0000000000..6e8c461854
--- /dev/null
+++ b/lib/ansible/modules/cloud/centurylink/clc_alert_policy.py
@@ -0,0 +1,541 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: clc_alert_policy
+short_description: Create or Delete Alert Policies at CenturyLink Cloud.
+description:
+ - An Ansible module to Create or Delete Alert Policies at CenturyLink Cloud.
+version_added: "2.0"
+options:
+ alias:
+ description:
+ - The alias of your CLC Account
+ required: True
+ name:
+ description:
+ - The name of the alert policy. This is mutually exclusive with id
+ required: False
+ default: None
+ id:
+ description:
+ - The alert policy id. This is mutually exclusive with name
+ required: False
+ default: None
+ alert_recipients:
+ description:
+ - A list of recipient email ids to notify the alert.
+ This is required for state 'present'
+ required: False
+ default: None
+ metric:
+ description:
+ - The metric on which to measure the condition that will trigger the alert.
+ This is required for state 'present'
+ required: False
+ default: None
+ choices: ['cpu','memory','disk']
+ duration:
+ description:
+ - The length of time in minutes that the condition must exceed the threshold.
+ This is required for state 'present'
+ required: False
+ default: None
+ threshold:
+ description:
+ - The threshold that will trigger the alert when the metric equals or exceeds it.
+ This is required for state 'present'
+ This number represents a percentage and must be a value between 5.0 - 95.0 that is a multiple of 5.0
+ required: False
+ default: None
+ state:
+ description:
+ - Whether to create or delete the policy.
+ required: False
+ default: present
+ choices: ['present','absent']
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+---
+- name: Create Alert Policy Example
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create an Alert Policy for disk above 80% for 5 minutes
+ clc_alert_policy:
+ alias: wfad
+ name: 'alert for disk > 80%'
+ alert_recipients:
+ - test1@centurylink.com
+ - test2@centurylink.com
+ metric: 'disk'
+ duration: '00:05:00'
+ threshold: 80
+ state: present
+ register: policy
+
+ - name: debug
+ debug: var=policy
+
+---
+- name: Delete Alert Policy Example
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Alert Policy
+ clc_alert_policy:
+ alias: wfad
+ name: 'alert for disk > 80%'
+ state: absent
+ register: policy
+
+ - name: debug
+ debug: var=policy
+'''
+
+RETURN = '''
+policy:
+ description: The alert policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "actions": [
+ {
+ "action": "email",
+ "settings": {
+ "recipients": [
+ "user1@domain.com",
+ "user1@domain.com"
+ ]
+ }
+ }
+ ],
+ "id": "ba54ac54a60d4a4f1ed6d48c1ce240a7",
+ "links": [
+ {
+ "href": "/v2/alertPolicies/alias/ba54ac54a60d4a4fb1d6d48c1ce240a7",
+ "rel": "self",
+ "verbs": [
+ "GET",
+ "DELETE",
+ "PUT"
+ ]
+ }
+ ],
+ "name": "test_alert",
+ "triggers": [
+ {
+ "duration": "00:05:00",
+ "metric": "disk",
+ "threshold": 80.0
+ }
+ ]
+ }
+'''
+
+__version__ = '${version}'
+
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcAlertPolicy:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ self.policy_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(default=None),
+ id=dict(default=None),
+ alias=dict(required=True, default=None),
+ alert_recipients=dict(type='list', default=None),
+ metric=dict(
+ choices=[
+ 'cpu',
+ 'memory',
+ 'disk'],
+ default=None),
+ duration=dict(type='str', default=None),
+ threshold=dict(type='int', default=None),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ mutually_exclusive = [
+ ['name', 'id']
+ ]
+ return {'argument_spec': argument_spec,
+ 'mutually_exclusive': mutually_exclusive}
+
+ # Module Behavior Goodness
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+
+ self._set_clc_credentials_from_env()
+ self.policy_dict = self._get_alert_policies(p['alias'])
+
+ if p['state'] == 'present':
+ changed, policy = self._ensure_alert_policy_is_present()
+ else:
+ changed, policy = self._ensure_alert_policy_is_absent()
+
+ self.module.exit_json(changed=changed, policy=policy)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_alert_policy_is_present(self):
+ """
+ Ensures that the alert policy is present
+ :return: (changed, policy)
+ changed: A flag representing if anything is modified
+ policy: the created/updated alert policy
+ """
+ changed = False
+ p = self.module.params
+ policy_name = p.get('name')
+
+ if not policy_name:
+ self.module.fail_json(msg='Policy name is a required')
+ policy = self._alert_policy_exists(policy_name)
+ if not policy:
+ changed = True
+ policy = None
+ if not self.module.check_mode:
+ policy = self._create_alert_policy()
+ else:
+ changed_u, policy = self._ensure_alert_policy_is_updated(policy)
+ if changed_u:
+ changed = True
+ return changed, policy
+
+ def _ensure_alert_policy_is_absent(self):
+ """
+ Ensures that the alert policy is absent
+ :return: (changed, None)
+ changed: A flag representing if anything is modified
+ """
+ changed = False
+ p = self.module.params
+ alert_policy_id = p.get('id')
+ alert_policy_name = p.get('name')
+ alias = p.get('alias')
+ if not alert_policy_id and not alert_policy_name:
+ self.module.fail_json(
+ msg='Either alert policy id or policy name is required')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id(
+ self.module,
+ alert_policy_name)
+ if alert_policy_id and alert_policy_id in self.policy_dict:
+ changed = True
+ if not self.module.check_mode:
+ self._delete_alert_policy(alias, alert_policy_id)
+ return changed, None
+
+ def _ensure_alert_policy_is_updated(self, alert_policy):
+ """
+ Ensures the alert policy is updated if anything is changed in the alert policy configuration
+ :param alert_policy: the target alert policy
+ :return: (changed, policy)
+ changed: A flag representing if anything is modified
+ policy: the updated the alert policy
+ """
+ changed = False
+ p = self.module.params
+ alert_policy_id = alert_policy.get('id')
+ email_list = p.get('alert_recipients')
+ metric = p.get('metric')
+ duration = p.get('duration')
+ threshold = p.get('threshold')
+ policy = alert_policy
+ if (metric and metric != str(alert_policy.get('triggers')[0].get('metric'))) or \
+ (duration and duration != str(alert_policy.get('triggers')[0].get('duration'))) or \
+ (threshold and float(threshold) != float(alert_policy.get('triggers')[0].get('threshold'))):
+ changed = True
+ elif email_list:
+ t_email_list = list(
+ alert_policy.get('actions')[0].get('settings').get('recipients'))
+ if set(email_list) != set(t_email_list):
+ changed = True
+ if changed and not self.module.check_mode:
+ policy = self._update_alert_policy(alert_policy_id)
+ return changed, policy
+
+ def _get_alert_policies(self, alias):
+ """
+ Get the alert policies for account alias by calling the CLC API.
+ :param alias: the account alias
+ :return: the alert policies for the account alias
+ """
+ response = {}
+
+ policies = self.clc.v2.API.Call('GET',
+ '/v2/alertPolicies/%s'
+ % alias)
+
+ for policy in policies.get('items'):
+ response[policy.get('id')] = policy
+ return response
+
+ def _create_alert_policy(self):
+ """
+ Create an alert Policy using the CLC API.
+ :return: response dictionary from the CLC API.
+ """
+ p = self.module.params
+ alias = p['alias']
+ email_list = p['alert_recipients']
+ metric = p['metric']
+ duration = p['duration']
+ threshold = p['threshold']
+ policy_name = p['name']
+ arguments = json.dumps(
+ {
+ 'name': policy_name,
+ 'actions': [{
+ 'action': 'email',
+ 'settings': {
+ 'recipients': email_list
+ }
+ }],
+ 'triggers': [{
+ 'metric': metric,
+ 'duration': duration,
+ 'threshold': threshold
+ }]
+ }
+ )
+ try:
+ result = self.clc.v2.API.Call(
+ 'POST',
+ '/v2/alertPolicies/%s' % alias,
+ arguments)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to create alert policy "{0}". {1}'.format(
+ policy_name, str(e.response_text)))
+ return result
+
+ def _update_alert_policy(self, alert_policy_id):
+ """
+ Update alert policy using the CLC API.
+ :param alert_policy_id: The clc alert policy id
+ :return: response dictionary from the CLC API.
+ """
+ p = self.module.params
+ alias = p['alias']
+ email_list = p['alert_recipients']
+ metric = p['metric']
+ duration = p['duration']
+ threshold = p['threshold']
+ policy_name = p['name']
+ arguments = json.dumps(
+ {
+ 'name': policy_name,
+ 'actions': [{
+ 'action': 'email',
+ 'settings': {
+ 'recipients': email_list
+ }
+ }],
+ 'triggers': [{
+ 'metric': metric,
+ 'duration': duration,
+ 'threshold': threshold
+ }]
+ }
+ )
+ try:
+ result = self.clc.v2.API.Call(
+ 'PUT', '/v2/alertPolicies/%s/%s' %
+ (alias, alert_policy_id), arguments)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to update alert policy "{0}". {1}'.format(
+ policy_name, str(e.response_text)))
+ return result
+
+ def _delete_alert_policy(self, alias, policy_id):
+ """
+ Delete an alert policy using the CLC API.
+ :param alias : the account alias
+ :param policy_id: the alert policy id
+ :return: response dictionary from the CLC API.
+ """
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/alertPolicies/%s/%s' %
+ (alias, policy_id), None)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to delete alert policy id "{0}". {1}'.format(
+ policy_id, str(e.response_text)))
+ return result
+
+ def _alert_policy_exists(self, policy_name):
+ """
+ Check to see if an alert policy exists
+ :param policy_name: name of the alert policy
+ :return: boolean of if the policy exists
+ """
+ result = False
+ for policy_id in self.policy_dict:
+ if self.policy_dict.get(policy_id).get('name') == policy_name:
+ result = self.policy_dict.get(policy_id)
+ return result
+
+ def _get_alert_policy_id(self, module, alert_policy_name):
+ """
+ retrieves the alert policy id of the account based on the name of the policy
+ :param module: the AnsibleModule object
+ :param alert_policy_name: the alert policy name
+ :return: alert_policy_id: The alert policy id
+ """
+ alert_policy_id = None
+ for policy_id in self.policy_dict:
+ if self.policy_dict.get(policy_id).get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = policy_id
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ argument_dict = ClcAlertPolicy._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_alert_policy = ClcAlertPolicy(module)
+ clc_alert_policy.process_request()
+
+from ansible.module_utils.basic import * # pylint: disable=W0614
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/centurylink/clc_blueprint_package.py b/lib/ansible/modules/cloud/centurylink/clc_blueprint_package.py
new file mode 100644
index 0000000000..8d4d28d20f
--- /dev/null
+++ b/lib/ansible/modules/cloud/centurylink/clc_blueprint_package.py
@@ -0,0 +1,306 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: clc_blueprint_package
+short_description: deploys a blue print package on a set of servers in CenturyLink Cloud.
+description:
+ - An Ansible module to deploy blue print package on a set of servers in CenturyLink Cloud.
+version_added: "2.0"
+options:
+ server_ids:
+ description:
+ - A list of server Ids to deploy the blue print package.
+ required: True
+ package_id:
+ description:
+ - The package id of the blue print.
+ required: True
+ package_params:
+ description:
+ - The dictionary of arguments required to deploy the blue print.
+ default: {}
+ required: False
+ state:
+ description:
+ - Whether to install or un-install the package. Currently it supports only "present" for install action.
+ required: False
+ default: present
+ choices: ['present']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ choices: [ True, False ]
+ default: True
+ required: False
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Deploy package
+ clc_blueprint_package:
+ server_ids:
+ - UC1TEST-SERVER1
+ - UC1TEST-SERVER2
+ package_id: 77abb844-579d-478d-3955-c69ab4a7ba1a
+ package_params: {}
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SERVER1",
+ "UC1TEST-SERVER2"
+ ]
+'''
+
+__version__ = '${version}'
+
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcBlueprintPackage:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+ changed = False
+ changed_server_ids = []
+ self._set_clc_credentials_from_env()
+ server_ids = p['server_ids']
+ package_id = p['package_id']
+ package_params = p['package_params']
+ state = p['state']
+ if state == 'present':
+ changed, changed_server_ids, request_list = self.ensure_package_installed(
+ server_ids, package_id, package_params)
+ self._wait_for_requests_to_complete(request_list)
+ self.module.exit_json(changed=changed, server_ids=changed_server_ids)
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ This function defines the dictionary object required for
+ package module
+ :return: the package dictionary object
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ package_id=dict(required=True),
+ package_params=dict(type='dict', default={}),
+ wait=dict(default=True),
+ state=dict(default='present', choices=['present'])
+ )
+ return argument_spec
+
+ def ensure_package_installed(self, server_ids, package_id, package_params):
+ """
+ Ensure the package is installed in the given list of servers
+ :param server_ids: the server list where the package needs to be installed
+ :param package_id: the blueprint package id
+ :param package_params: the package arguments
+ :return: (changed, server_ids, request_list)
+ changed: A flag indicating if a change was made
+ server_ids: The list of servers modified
+ request_list: The list of request objects from clc-sdk
+ """
+ changed = False
+ request_list = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to get servers from CLC')
+ for server in servers:
+ if not self.module.check_mode:
+ request = self.clc_install_package(
+ server,
+ package_id,
+ package_params)
+ request_list.append(request)
+ changed = True
+ return changed, server_ids, request_list
+
+ def clc_install_package(self, server, package_id, package_params):
+ """
+ Install the package to a given clc server
+ :param server: The server object where the package needs to be installed
+ :param package_id: The blue print package id
+ :param package_params: the required argument dict for the package installation
+ :return: The result object from the CLC API call
+ """
+ result = None
+ try:
+ result = server.ExecutePackage(
+ package_id=package_id,
+ parameters=package_params)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to install package : {0} to server {1}. {2}'.format(
+ package_id, server.id, ex.message
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, request_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param request_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in request_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process package install request')
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: the list of server ids
+ :param message: the error message to raise if there is any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ self.module.fail_json(msg=message + ': %s' % ex)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ Main function
+ :return: None
+ """
+ module = AnsibleModule(
+ argument_spec=ClcBlueprintPackage.define_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_blueprint_package = ClcBlueprintPackage(module)
+ clc_blueprint_package.process_request()
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/centurylink/clc_firewall_policy.py b/lib/ansible/modules/cloud/centurylink/clc_firewall_policy.py
new file mode 100644
index 0000000000..4ccfe171f2
--- /dev/null
+++ b/lib/ansible/modules/cloud/centurylink/clc_firewall_policy.py
@@ -0,0 +1,601 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: clc_firewall_policy
+short_description: Create/delete/update firewall policies
+description:
+ - Create or delete or update firewall polices on Centurylink Cloud
+version_added: "2.0"
+options:
+ location:
+ description:
+ - Target datacenter for the firewall policy
+ required: True
+ state:
+ description:
+ - Whether to create or delete the firewall policy
+ default: present
+ required: False
+ choices: ['present', 'absent']
+ source:
+ description:
+ - The list of source addresses for traffic on the originating firewall.
+ This is required when state is 'present"
+ default: None
+ required: False
+ destination:
+ description:
+ - The list of destination addresses for traffic on the terminating firewall.
+ This is required when state is 'present'
+ default: None
+ required: False
+ ports:
+ description:
+ - The list of ports associated with the policy.
+ TCP and UDP can take in single ports or port ranges.
+ default: None
+ required: False
+ choices: ['any', 'icmp', 'TCP/123', 'UDP/123', 'TCP/123-456', 'UDP/123-456']
+ firewall_policy_id:
+ description:
+ - Id of the firewall policy. This is required to update or delete an existing firewall policy
+ default: None
+ required: False
+ source_account_alias:
+ description:
+ - CLC alias for the source account
+ required: True
+ destination_account_alias:
+ description:
+ - CLC alias for the destination account
+ default: None
+ required: False
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ default: True
+ required: False
+ choices: [True, False]
+ enabled:
+ description:
+ - Whether the firewall policy is enabled or disabled
+ default: True
+ required: False
+ choices: [True, False]
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+---
+- name: Create Firewall Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create / Verify an Firewall Policy at CenturyLink Cloud
+ clc_firewall:
+ source_account_alias: WFAD
+ location: VA1
+ state: present
+ source: 10.128.216.0/24
+ destination: 10.128.216.0/24
+ ports: Any
+ destination_account_alias: WFAD
+
+---
+- name: Delete Firewall Policy
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete an Firewall Policy at CenturyLink Cloud
+ clc_firewall:
+ source_account_alias: WFAD
+ location: VA1
+ state: absent
+ firewall_policy_id: c62105233d7a4231bd2e91b9c791e43e1
+'''
+
+RETURN = '''
+firewall_policy_id:
+ description: The fire wall policy id
+ returned: success
+ type: string
+ sample: fc36f1bfd47242e488a9c44346438c05
+firewall_policy:
+ description: The fire wall policy information
+ returned: success
+ type: dict
+ sample:
+ {
+ "destination":[
+ "10.1.1.0/24",
+ "10.2.2.0/24"
+ ],
+ "destinationAccount":"wfad",
+ "enabled":true,
+ "id":"fc36f1bfd47242e488a9c44346438c05",
+ "links":[
+ {
+ "href":"http://api.ctl.io/v2-experimental/firewallPolicies/wfad/uc1/fc36f1bfd47242e488a9c44346438c05",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "ports":[
+ "any"
+ ],
+ "source":[
+ "10.1.1.0/24",
+ "10.2.2.0/24"
+ ],
+ "status":"active"
+ }
+'''
+
+__version__ = '${version}'
+
+import urlparse
+from time import sleep
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcFirewallPolicy:
+
+ clc = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.firewall_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ location=dict(required=True),
+ source_account_alias=dict(required=True, default=None),
+ destination_account_alias=dict(default=None),
+ firewall_policy_id=dict(default=None),
+ ports=dict(default=None, type='list'),
+ source=dict(defualt=None, type='list'),
+ destination=dict(defualt=None, type='list'),
+ wait=dict(default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ enabled=dict(defualt=True, choices=[True, False])
+ )
+ return argument_spec
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ changed = False
+ firewall_policy = None
+ location = self.module.params.get('location')
+ source_account_alias = self.module.params.get('source_account_alias')
+ destination_account_alias = self.module.params.get(
+ 'destination_account_alias')
+ firewall_policy_id = self.module.params.get('firewall_policy_id')
+ ports = self.module.params.get('ports')
+ source = self.module.params.get('source')
+ destination = self.module.params.get('destination')
+ wait = self.module.params.get('wait')
+ state = self.module.params.get('state')
+ enabled = self.module.params.get('enabled')
+
+ self.firewall_dict = {
+ 'location': location,
+ 'source_account_alias': source_account_alias,
+ 'destination_account_alias': destination_account_alias,
+ 'firewall_policy_id': firewall_policy_id,
+ 'ports': ports,
+ 'source': source,
+ 'destination': destination,
+ 'wait': wait,
+ 'state': state,
+ 'enabled': enabled}
+
+ self._set_clc_credentials_from_env()
+
+ if state == 'absent':
+ changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_absent(
+ source_account_alias, location, self.firewall_dict)
+
+ elif state == 'present':
+ changed, firewall_policy_id, firewall_policy = self._ensure_firewall_policy_is_present(
+ source_account_alias, location, self.firewall_dict)
+
+ return self.module.exit_json(
+ changed=changed,
+ firewall_policy_id=firewall_policy_id,
+ firewall_policy=firewall_policy)
+
+ @staticmethod
+ def _get_policy_id_from_response(response):
+ """
+ Method to parse out the policy id from creation response
+ :param response: response from firewall creation API call
+ :return: policy_id: firewall policy id from creation call
+ """
+ url = response.get('links')[0]['href']
+ path = urlparse.urlparse(url).path
+ path_list = os.path.split(path)
+ policy_id = path_list[-1]
+ return policy_id
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_firewall_policy_is_present(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Ensures that a given firewall policy is present
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: (changed, firewall_policy_id, firewall_policy)
+ changed: flag for if a change occurred
+ firewall_policy_id: the firewall policy id that was created/updated
+ firewall_policy: The firewall_policy object
+ """
+ firewall_policy = None
+ firewall_policy_id = firewall_dict.get('firewall_policy_id')
+
+ if firewall_policy_id is None:
+ if not self.module.check_mode:
+ response = self._create_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_dict)
+ firewall_policy_id = self._get_policy_id_from_response(
+ response)
+ changed = True
+ else:
+ firewall_policy = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ if not firewall_policy:
+ return self.module.fail_json(
+ msg='Unable to find the firewall policy id : {0}'.format(
+ firewall_policy_id))
+ changed = self._compare_get_request_with_dict(
+ firewall_policy,
+ firewall_dict)
+ if not self.module.check_mode and changed:
+ self._update_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ firewall_dict)
+ if changed and firewall_policy_id:
+ firewall_policy = self._wait_for_requests_to_complete(
+ source_account_alias,
+ location,
+ firewall_policy_id)
+ return changed, firewall_policy_id, firewall_policy
+
+ def _ensure_firewall_policy_is_absent(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Ensures that a given firewall policy is removed if present
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: firewall policy to delete
+ :return: (changed, firewall_policy_id, response)
+ changed: flag for if a change occurred
+ firewall_policy_id: the firewall policy id that was deleted
+ response: response from CLC API call
+ """
+ changed = False
+ response = []
+ firewall_policy_id = firewall_dict.get('firewall_policy_id')
+ result = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ if result:
+ if not self.module.check_mode:
+ response = self._delete_firewall_policy(
+ source_account_alias,
+ location,
+ firewall_policy_id)
+ changed = True
+ return changed, firewall_policy_id, response
+
+ def _create_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_dict):
+ """
+ Creates the firewall policy for the given account alias
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: response from CLC API call
+ """
+ payload = {
+ 'destinationAccount': firewall_dict.get('destination_account_alias'),
+ 'source': firewall_dict.get('source'),
+ 'destination': firewall_dict.get('destination'),
+ 'ports': firewall_dict.get('ports')}
+ try:
+ response = self.clc.v2.API.Call(
+ 'POST', '/v2-experimental/firewallPolicies/%s/%s' %
+ (source_account_alias, location), payload)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to create firewall policy. %s" %
+ str(e.response_text))
+ return response
+
+ def _delete_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id):
+ """
+ Deletes a given firewall policy for an account alias in a datacenter
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: firewall policy id to delete
+ :return: response: response from CLC API call
+ """
+ try:
+ response = self.clc.v2.API.Call(
+ 'DELETE', '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias, location, firewall_policy_id))
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to delete the firewall policy id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ def _update_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ firewall_dict):
+ """
+ Updates a firewall policy for a given datacenter and account alias
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: firewall policy id to update
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: response: response from CLC API call
+ """
+ try:
+ response = self.clc.v2.API.Call(
+ 'PUT',
+ '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias,
+ location,
+ firewall_policy_id),
+ firewall_dict)
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg="Unable to update the firewall policy id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ @staticmethod
+ def _compare_get_request_with_dict(response, firewall_dict):
+ """
+ Helper method to compare the json response for getting the firewall policy with the request parameters
+ :param response: response from the get method
+ :param firewall_dict: dictionary of request parameters for firewall policy
+ :return: changed: Boolean that returns true if there are differences between
+ the response parameters and the playbook parameters
+ """
+
+ changed = False
+
+ response_dest_account_alias = response.get('destinationAccount')
+ response_enabled = response.get('enabled')
+ response_source = response.get('source')
+ response_dest = response.get('destination')
+ response_ports = response.get('ports')
+ request_dest_account_alias = firewall_dict.get(
+ 'destination_account_alias')
+ request_enabled = firewall_dict.get('enabled')
+ if request_enabled is None:
+ request_enabled = True
+ request_source = firewall_dict.get('source')
+ request_dest = firewall_dict.get('destination')
+ request_ports = firewall_dict.get('ports')
+
+ if (
+ response_dest_account_alias and str(response_dest_account_alias) != str(request_dest_account_alias)) or (
+ response_enabled != request_enabled) or (
+ response_source and response_source != request_source) or (
+ response_dest and response_dest != request_dest) or (
+ response_ports and response_ports != request_ports):
+ changed = True
+ return changed
+
+ def _get_firewall_policy(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id):
+ """
+ Get back details for a particular firewall policy
+ :param source_account_alias: the source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: id of the firewall policy to get
+ :return: response - The response from CLC API call
+ """
+ response = None
+ try:
+ response = self.clc.v2.API.Call(
+ 'GET', '/v2-experimental/firewallPolicies/%s/%s/%s' %
+ (source_account_alias, location, firewall_policy_id))
+ except APIFailedResponse as e:
+ if e.response_status_code != 404:
+ self.module.fail_json(
+ msg="Unable to fetch the firewall policy with id : {0}. {1}".format(
+ firewall_policy_id, str(e.response_text)))
+ return response
+
+ def _wait_for_requests_to_complete(
+ self,
+ source_account_alias,
+ location,
+ firewall_policy_id,
+ wait_limit=50):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param source_account_alias: The source account alias for the firewall policy
+ :param location: datacenter of the firewall policy
+ :param firewall_policy_id: The firewall policy id
+ :param wait_limit: The number of times to check the status for completion
+ :return: the firewall_policy object
+ """
+ wait = self.module.params.get('wait')
+ count = 0
+ firewall_policy = None
+ while wait:
+ count += 1
+ firewall_policy = self._get_firewall_policy(
+ source_account_alias, location, firewall_policy_id)
+ status = firewall_policy.get('status')
+ if status == 'active' or count > wait_limit:
+ wait = False
+ else:
+ # wait for 2 seconds
+ sleep(2)
+ return firewall_policy
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcFirewallPolicy._define_module_argument_spec(),
+ supports_check_mode=True)
+
+ clc_firewall = ClcFirewallPolicy(module)
+ clc_firewall.process_request()
+
+from ansible.module_utils.basic import * # pylint: disable=W0614
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/centurylink/clc_group.py b/lib/ansible/modules/cloud/centurylink/clc_group.py
new file mode 100644
index 0000000000..4c522b7b0b
--- /dev/null
+++ b/lib/ansible/modules/cloud/centurylink/clc_group.py
@@ -0,0 +1,521 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: clc_group
+short_description: Create/delete Server Groups at Centurylink Cloud
+description:
+ - Create or delete Server Groups at Centurylink Centurylink Cloud
+version_added: "2.0"
+options:
+ name:
+ description:
+ - The name of the Server Group
+ required: True
+ description:
+ description:
+ - A description of the Server Group
+ required: False
+ parent:
+ description:
+ - The parent group of the server group. If parent is not provided, it creates the group at top level.
+ required: False
+ location:
+ description:
+ - Datacenter to create the group in. If location is not provided, the group gets created in the default datacenter
+ associated with the account
+ required: False
+ state:
+ description:
+ - Whether to create or delete the group
+ default: present
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ choices: [ True, False ]
+ default: True
+ required: False
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+
+# Create a Server Group
+
+---
+- name: Create Server Group
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create / Verify a Server Group at CenturyLink Cloud
+ clc_group:
+ name: My Cool Server Group
+ parent: Default Group
+ state: present
+ register: clc
+
+ - name: debug
+ debug:
+ var: clc
+
+# Delete a Server Group
+
+---
+- name: Delete Server Group
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Delete / Verify Absent a Server Group at CenturyLink Cloud
+ clc_group:
+ name: My Cool Server Group
+ parent: Default Group
+ state: absent
+ register: clc
+
+ - name: debug
+ debug:
+ var: clc
+'''
+
+RETURN = '''
+group:
+ description: The group information
+ returned: success
+ type: dict
+ sample:
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":"2015-07-29T18:52:47Z",
+ "modifiedBy":"service.wfad",
+ "modifiedDate":"2015-07-29T18:52:47Z"
+ },
+ "customFields":[
+
+ ],
+ "description":"test group",
+ "groups":[
+
+ ],
+ "id":"bb5f12a3c6044ae4ad0a03e73ae12cd1",
+ "links":[
+ {
+ "href":"/v2/groups/wfad",
+ "rel":"createGroup",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad",
+ "rel":"createServer",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"parentGroup"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/defaults",
+ "rel":"defaults",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/billing",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/archive",
+ "rel":"archiveGroupAction"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/horizontalAutoscalePolicy",
+ "rel":"horizontalAutoscalePolicyMapping",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/bb5f12a3c6044ae4ad0a03e73ae12cd1/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test group",
+ "status":"active",
+ "type":"default"
+ }
+'''
+
+__version__ = '${version}'
+
+import os
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ClcGroup(object):
+
+ clc = None
+ root_group = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.group_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ location = self.module.params.get('location')
+ group_name = self.module.params.get('name')
+ parent_name = self.module.params.get('parent')
+ group_description = self.module.params.get('description')
+ state = self.module.params.get('state')
+
+ self._set_clc_credentials_from_env()
+ self.group_dict = self._get_group_tree_for_datacenter(
+ datacenter=location)
+
+ if state == "absent":
+ changed, group, requests = self._ensure_group_is_absent(
+ group_name=group_name, parent_name=parent_name)
+ if requests:
+ self._wait_for_requests_to_complete(requests)
+ else:
+ changed, group = self._ensure_group_is_present(
+ group_name=group_name, parent_name=parent_name, group_description=group_description)
+ try:
+ group = group.data
+ except AttributeError:
+ group = group_name
+ self.module.exit_json(changed=changed, group=group)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(default=None),
+ parent=dict(default=None),
+ location=dict(default=None),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=True))
+
+ return argument_spec
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _ensure_group_is_absent(self, group_name, parent_name):
+ """
+ Ensure that group_name is absent by deleting it if necessary
+ :param group_name: string - the name of the clc server group to delete
+ :param parent_name: string - the name of the parent group for group_name
+ :return: changed, group
+ """
+ changed = False
+ group = []
+ results = []
+
+ if self._group_exists(group_name=group_name, parent_name=parent_name):
+ if not self.module.check_mode:
+ group.append(group_name)
+ result = self._delete_group(group_name)
+ results.append(result)
+ changed = True
+ return changed, group, results
+
+ def _delete_group(self, group_name):
+ """
+ Delete the provided server group
+ :param group_name: string - the server group to delete
+ :return: none
+ """
+ response = None
+ group, parent = self.group_dict.get(group_name)
+ try:
+ response = group.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete group :{0}. {1}'.format(
+ group_name, ex.response_text
+ ))
+ return response
+
+ def _ensure_group_is_present(
+ self,
+ group_name,
+ parent_name,
+ group_description):
+ """
+ Checks to see if a server group exists, creates it if it doesn't.
+ :param group_name: the name of the group to validate/create
+ :param parent_name: the name of the parent group for group_name
+ :param group_description: a short description of the server group (used when creating)
+ :return: (changed, group) -
+ changed: Boolean- whether a change was made,
+ group: A clc group object for the group
+ """
+ assert self.root_group, "Implementation Error: Root Group not set"
+ parent = parent_name if parent_name is not None else self.root_group.name
+ description = group_description
+ changed = False
+ group = group_name
+
+ parent_exists = self._group_exists(group_name=parent, parent_name=None)
+ child_exists = self._group_exists(
+ group_name=group_name,
+ parent_name=parent)
+
+ if parent_exists and child_exists:
+ group, parent = self.group_dict[group_name]
+ changed = False
+ elif parent_exists and not child_exists:
+ if not self.module.check_mode:
+ group = self._create_group(
+ group=group,
+ parent=parent,
+ description=description)
+ changed = True
+ else:
+ self.module.fail_json(
+ msg="parent group: " +
+ parent +
+ " does not exist")
+
+ return changed, group
+
+ def _create_group(self, group, parent, description):
+ """
+ Create the provided server group
+ :param group: clc_sdk.Group - the group to create
+ :param parent: clc_sdk.Parent - the parent group for {group}
+ :param description: string - a text description of the group
+ :return: clc_sdk.Group - the created group
+ """
+ response = None
+ (parent, grandparent) = self.group_dict[parent]
+ try:
+ response = parent.Create(name=group, description=description)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create group :{0}. {1}'.format(
+ group, ex.response_text))
+ return response
+
+ def _group_exists(self, group_name, parent_name):
+ """
+ Check to see if a group exists
+ :param group_name: string - the group to check
+ :param parent_name: string - the parent of group_name
+ :return: boolean - whether the group exists
+ """
+ result = False
+ if group_name in self.group_dict:
+ (group, parent) = self.group_dict[group_name]
+ if parent_name is None or parent_name == parent.name:
+ result = True
+ return result
+
+ def _get_group_tree_for_datacenter(self, datacenter=None):
+ """
+ Walk the tree of groups for a datacenter
+ :param datacenter: string - the datacenter to walk (ex: 'UC1')
+ :return: a dictionary of groups and parents
+ """
+ self.root_group = self.clc.v2.Datacenter(
+ location=datacenter).RootGroup()
+ return self._walk_groups_recursive(
+ parent_group=None,
+ child_group=self.root_group)
+
+ def _walk_groups_recursive(self, parent_group, child_group):
+ """
+ Walk a parent-child tree of groups, starting with the provided child group
+ :param parent_group: clc_sdk.Group - the parent group to start the walk
+ :param child_group: clc_sdk.Group - the child group to start the walk
+ :return: a dictionary of groups and parents
+ """
+ result = {str(child_group): (child_group, parent_group)}
+ groups = child_group.Subgroups().groups
+ if len(groups) > 0:
+ for group in groups:
+ if group.type != 'default':
+ continue
+
+ result.update(self._walk_groups_recursive(child_group, group))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process group request')
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcGroup._define_module_argument_spec(),
+ supports_check_mode=True)
+
+ clc_group = ClcGroup(module)
+ clc_group.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/centurylink/clc_loadbalancer.py b/lib/ansible/modules/cloud/centurylink/clc_loadbalancer.py
new file mode 100644
index 0000000000..e159953ba3
--- /dev/null
+++ b/lib/ansible/modules/cloud/centurylink/clc_loadbalancer.py
@@ -0,0 +1,945 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: clc_loadbalancer
+short_description: Create, Delete shared loadbalancers in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete shared loadbalancers in CenturyLink Cloud.
+version_added: "2.0"
+options:
+ name:
+ description:
+ - The name of the loadbalancer
+ required: True
+ description:
+ description:
+ - A description for the loadbalancer
+ required: False
+ default: None
+ alias:
+ description:
+ - The alias of your CLC Account
+ required: True
+ location:
+ description:
+ - The location of the datacenter where the load balancer resides in
+ required: True
+ method:
+ description:
+ -The balancing method for the load balancer pool
+ required: False
+ default: None
+ choices: ['leastConnection', 'roundRobin']
+ persistence:
+ description:
+ - The persistence method for the load balancer
+ required: False
+ default: None
+ choices: ['standard', 'sticky']
+ port:
+ description:
+ - Port to configure on the public-facing side of the load balancer pool
+ required: False
+ default: None
+ choices: [80, 443]
+ nodes:
+ description:
+ - A list of nodes that needs to be added to the load balancer pool
+ required: False
+ default: []
+ status:
+ description:
+ - The status of the loadbalancer
+ required: False
+ default: enabled
+ choices: ['enabled', 'disabled']
+ state:
+ description:
+ - Whether to create or delete the load balancer pool
+ required: False
+ default: present
+ choices: ['present', 'absent', 'port_absent', 'nodes_present', 'nodes_absent']
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+- name: Create Loadbalancer
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: present
+
+- name: Add node to an existing loadbalancer pool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.234
+ privatePort: 80
+ state: nodes_present
+
+- name: Remove node from an existing loadbalancer pool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Create things
+ clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.234
+ privatePort: 80
+ state: nodes_absent
+
+- name: Delete LoadbalancerPool
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Delete things
+ clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: port_absent
+
+- name: Delete Loadbalancer
+ hosts: localhost
+ connection: local
+ tasks:
+ - name: Actually Delete things
+ clc_loadbalancer:
+ name: test
+ description: test
+ alias: TEST
+ location: WA1
+ port: 443
+ nodes:
+ - ipAddress: 10.11.22.123
+ privatePort: 80
+ state: absent
+'''
+
+RETURN = '''
+loadbalancer:
+ description: The load balancer result object from CLC
+ returned: success
+ type: dict
+ sample:
+ {
+ "description":"test-lb",
+ "id":"ab5b18cb81e94ab9925b61d1ca043fb5",
+ "ipAddress":"66.150.174.197",
+ "links":[
+ {
+ "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/sharedLoadBalancers/wfad/wa1/ab5b18cb81e94ab9925b61d1ca043fb5/pools",
+ "rel":"pools",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ }
+ ],
+ "name":"test-lb",
+ "pools":[
+
+ ],
+ "status":"enabled"
+ }
+'''
+
+__version__ = '${version}'
+
+from time import sleep
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcLoadBalancer:
+
+ clc = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.lb_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Execute the main code path, and handle the request
+ :return: none
+ """
+ changed = False
+ result_lb = None
+ loadbalancer_name = self.module.params.get('name')
+ loadbalancer_alias = self.module.params.get('alias')
+ loadbalancer_location = self.module.params.get('location')
+ loadbalancer_description = self.module.params.get('description')
+ loadbalancer_port = self.module.params.get('port')
+ loadbalancer_method = self.module.params.get('method')
+ loadbalancer_persistence = self.module.params.get('persistence')
+ loadbalancer_nodes = self.module.params.get('nodes')
+ loadbalancer_status = self.module.params.get('status')
+ state = self.module.params.get('state')
+
+ if loadbalancer_description is None:
+ loadbalancer_description = loadbalancer_name
+
+ self._set_clc_credentials_from_env()
+
+ self.lb_dict = self._get_loadbalancer_list(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location)
+
+ if state == 'present':
+ changed, result_lb, lb_id = self.ensure_loadbalancer_present(
+ name=loadbalancer_name,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ description=loadbalancer_description,
+ status=loadbalancer_status)
+ if loadbalancer_port:
+ changed, result_pool, pool_id = self.ensure_loadbalancerpool_present(
+ lb_id=lb_id,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ method=loadbalancer_method,
+ persistence=loadbalancer_persistence,
+ port=loadbalancer_port)
+
+ if loadbalancer_nodes:
+ changed, result_nodes = self.ensure_lbpool_nodes_set(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+ elif state == 'absent':
+ changed, result_lb = self.ensure_loadbalancer_absent(
+ name=loadbalancer_name,
+ alias=loadbalancer_alias,
+ location=loadbalancer_location)
+
+ elif state == 'port_absent':
+ changed, result_lb = self.ensure_loadbalancerpool_absent(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port)
+
+ elif state == 'nodes_present':
+ changed, result_lb = self.ensure_lbpool_nodes_present(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+
+ elif state == 'nodes_absent':
+ changed, result_lb = self.ensure_lbpool_nodes_absent(
+ alias=loadbalancer_alias,
+ location=loadbalancer_location,
+ name=loadbalancer_name,
+ port=loadbalancer_port,
+ nodes=loadbalancer_nodes)
+
+ self.module.exit_json(changed=changed, loadbalancer=result_lb)
+
+ def ensure_loadbalancer_present(
+ self, name, alias, location, description, status):
+ """
+ Checks to see if a load balancer exists and creates one if it does not.
+ :param name: Name of loadbalancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :param description: Description of loadbalancer
+ :param status: Enabled / Disabled
+ :return: (changed, result, lb_id)
+ changed: Boolean whether a change was made
+ result: The result object from the CLC load balancer request
+ lb_id: The load balancer id
+ """
+ changed = False
+ result = name
+ lb_id = self._loadbalancer_exists(name=name)
+ if not lb_id:
+ if not self.module.check_mode:
+ result = self.create_loadbalancer(name=name,
+ alias=alias,
+ location=location,
+ description=description,
+ status=status)
+ lb_id = result.get('id')
+ changed = True
+
+ return changed, result, lb_id
+
+ def ensure_loadbalancerpool_present(
+ self, lb_id, alias, location, method, persistence, port):
+ """
+ Checks to see if a load balancer pool exists and creates one if it does not.
+ :param lb_id: The loadbalancer id
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param method: the load balancing method
+ :param persistence: the load balancing persistence type
+ :param port: the port that the load balancer will listen on
+ :return: (changed, group, pool_id) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ pool_id: The string id of the load balancer pool
+ """
+ changed = False
+ result = port
+ if not lb_id:
+ return changed, None, None
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if not pool_id:
+ if not self.module.check_mode:
+ result = self.create_loadbalancerpool(
+ alias=alias,
+ location=location,
+ lb_id=lb_id,
+ method=method,
+ persistence=persistence,
+ port=port)
+ pool_id = result.get('id')
+ changed = True
+
+ return changed, result, pool_id
+
+ def ensure_loadbalancer_absent(self, name, alias, location):
+ """
+ Checks to see if a load balancer exists and deletes it if it does
+ :param name: Name of the load balancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :return: (changed, result)
+ changed: Boolean whether a change was made
+ result: The result from the CLC API Call
+ """
+ changed = False
+ result = name
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ if not self.module.check_mode:
+ result = self.delete_loadbalancer(alias=alias,
+ location=location,
+ name=name)
+ changed = True
+ return changed, result
+
+ def ensure_loadbalancerpool_absent(self, alias, location, name, port):
+ """
+ Checks to see if a load balancer pool exists and deletes it if it does
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer listens on
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = None
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed = True
+ if not self.module.check_mode:
+ result = self.delete_loadbalancerpool(
+ alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "LB Doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_set(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool
+ and set the nodes if any in the list those doesn't exist
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: The list of nodes to be updated to the pool
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ result = {}
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ nodes_exist = self._loadbalancerpool_nodes_exists(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_check=nodes)
+ if not nodes_exist:
+ changed = True
+ result = self.set_loadbalancernodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_present(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool and add the missing nodes to the pool
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: the list of nodes to be added
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed, result = self.add_lbpool_nodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_add=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def ensure_lbpool_nodes_absent(self, alias, location, name, port, nodes):
+ """
+ Checks to see if the provided list of nodes exist for the pool and removes them if found any
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param name: the name of the load balancer
+ :param port: the port that the load balancer will listen on
+ :param nodes: the list of nodes to be removed
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ lb_exists = self._loadbalancer_exists(name=name)
+ if lb_exists:
+ lb_id = self._get_loadbalancer_id(name=name)
+ pool_id = self._loadbalancerpool_exists(
+ alias=alias,
+ location=location,
+ port=port,
+ lb_id=lb_id)
+ if pool_id:
+ changed, result = self.remove_lbpool_nodes(alias=alias,
+ location=location,
+ lb_id=lb_id,
+ pool_id=pool_id,
+ nodes_to_remove=nodes)
+ else:
+ result = "Pool doesn't exist"
+ else:
+ result = "Load balancer doesn't Exist"
+ return changed, result
+
+ def create_loadbalancer(self, name, alias, location, description, status):
+ """
+ Create a loadbalancer w/ params
+ :param name: Name of loadbalancer
+ :param alias: Alias of account
+ :param location: Datacenter
+ :param description: Description for loadbalancer to be created
+ :param status: Enabled / Disabled
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call('POST',
+ '/v2/sharedLoadBalancers/%s/%s' % (alias,
+ location),
+ json.dumps({"name": name,
+ "description": description,
+ "status": status}))
+ sleep(1)
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to create load balancer "{0}". {1}'.format(
+ name, str(e.response_text)))
+ return result
+
+ def create_loadbalancerpool(
+ self, alias, location, lb_id, method, persistence, port):
+ """
+ Creates a pool on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param method: the load balancing method
+ :param persistence: the load balancing persistence type
+ :param port: the port that the load balancer will listen on
+ :return: result: The result from the create API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'POST', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
+ (alias, location, lb_id), json.dumps(
+ {
+ "port": port, "method": method, "persistence": persistence
+ }))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to create pool for load balancer id "{0}". {1}'.format(
+ lb_id, str(e.response_text)))
+ return result
+
+ def delete_loadbalancer(self, alias, location, name):
+ """
+ Delete CLC loadbalancer
+ :param alias: Alias for account
+ :param location: Datacenter
+ :param name: Name of the loadbalancer to delete
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ lb_id = self._get_loadbalancer_id(name=name)
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s' %
+ (alias, location, lb_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to delete load balancer "{0}". {1}'.format(
+ name, str(e.response_text)))
+ return result
+
+ def delete_loadbalancerpool(self, alias, location, lb_id, pool_id):
+ """
+ Delete the pool on the provided load balancer
+ :param alias: The account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the load balancer pool
+ :return: result: The result from the delete API call
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'DELETE', '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s' %
+ (alias, location, lb_id, pool_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to delete pool for load balancer id "{0}". {1}'.format(
+ lb_id, str(e.response_text)))
+ return result
+
+ def _get_loadbalancer_id(self, name):
+ """
+ Retrieves unique ID of loadbalancer
+ :param name: Name of loadbalancer
+ :return: Unique ID of the loadbalancer
+ """
+ id = None
+ for lb in self.lb_dict:
+ if lb.get('name') == name:
+ id = lb.get('id')
+ return id
+
+ def _get_loadbalancer_list(self, alias, location):
+ """
+ Retrieve a list of loadbalancers
+ :param alias: Alias for account
+ :param location: Datacenter
+ :return: JSON data for all loadbalancers at datacenter
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call(
+ 'GET', '/v2/sharedLoadBalancers/%s/%s' % (alias, location))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to fetch load balancers for account: {0}. {1}'.format(
+ alias, str(e.response_text)))
+ return result
+
+ def _loadbalancer_exists(self, name):
+ """
+ Verify a loadbalancer exists
+ :param name: Name of loadbalancer
+ :return: False or the ID of the existing loadbalancer
+ """
+ result = False
+
+ for lb in self.lb_dict:
+ if lb.get('name') == name:
+ result = lb.get('id')
+ return result
+
+ def _loadbalancerpool_exists(self, alias, location, port, lb_id):
+ """
+ Checks to see if a pool exists on the specified port on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param port: the port to check and see if it exists
+ :param lb_id: the id string of the provided load balancer
+ :return: result: The id string of the pool or False
+ """
+ result = False
+ try:
+ pool_list = self.clc.v2.API.Call(
+ 'GET', '/v2/sharedLoadBalancers/%s/%s/%s/pools' %
+ (alias, location, lb_id))
+ except APIFailedResponse as e:
+ return self.module.fail_json(
+ msg='Unable to fetch the load balancer pools for for load balancer id: {0}. {1}'.format(
+ lb_id, str(e.response_text)))
+ for pool in pool_list:
+ if int(pool.get('port')) == int(port):
+ result = pool.get('id')
+ return result
+
+ def _loadbalancerpool_nodes_exists(
+ self, alias, location, lb_id, pool_id, nodes_to_check):
+ """
+ Checks to see if a set of nodes exists on the specified port on the provided load balancer
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the provided load balancer
+ :param pool_id: the id string of the load balancer pool
+ :param nodes_to_check: the list of nodes to check for
+ :return: result: True / False indicating if the given nodes exist
+ """
+ result = False
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_check:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node in nodes:
+ result = True
+ else:
+ result = False
+ return result
+
+ def set_loadbalancernodes(self, alias, location, lb_id, pool_id, nodes):
+ """
+ Updates nodes to the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes: a list of dictionaries containing the nodes to set
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not lb_id:
+ return result
+ if not self.module.check_mode:
+ try:
+ result = self.clc.v2.API.Call('PUT',
+ '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
+ % (alias, location, lb_id, pool_id), json.dumps(nodes))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to set nodes for the load balancer pool id "{0}". {1}'.format(
+ pool_id, str(e.response_text)))
+ return result
+
+ def add_lbpool_nodes(self, alias, location, lb_id, pool_id, nodes_to_add):
+ """
+ Add nodes to the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes_to_add: a list of dictionaries containing the nodes to add
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = {}
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_add:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if not node in nodes:
+ changed = True
+ nodes.append(node)
+ if changed == True and not self.module.check_mode:
+ result = self.set_loadbalancernodes(
+ alias,
+ location,
+ lb_id,
+ pool_id,
+ nodes)
+ return changed, result
+
+ def remove_lbpool_nodes(
+ self, alias, location, lb_id, pool_id, nodes_to_remove):
+ """
+ Removes nodes from the provided pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :param nodes_to_remove: a list of dictionaries containing the nodes to remove
+ :return: (changed, result) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ result = {}
+ nodes = self._get_lbpool_nodes(alias, location, lb_id, pool_id)
+ for node in nodes_to_remove:
+ if not node.get('status'):
+ node['status'] = 'enabled'
+ if node in nodes:
+ changed = True
+ nodes.remove(node)
+ if changed == True and not self.module.check_mode:
+ result = self.set_loadbalancernodes(
+ alias,
+ location,
+ lb_id,
+ pool_id,
+ nodes)
+ return changed, result
+
+ def _get_lbpool_nodes(self, alias, location, lb_id, pool_id):
+ """
+ Return the list of nodes available to the provided load balancer pool
+ :param alias: the account alias
+ :param location: the datacenter the load balancer resides in
+ :param lb_id: the id string of the load balancer
+ :param pool_id: the id string of the pool
+ :return: result: The list of nodes
+ """
+ result = None
+ try:
+ result = self.clc.v2.API.Call('GET',
+ '/v2/sharedLoadBalancers/%s/%s/%s/pools/%s/nodes'
+ % (alias, location, lb_id, pool_id))
+ except APIFailedResponse as e:
+ self.module.fail_json(
+ msg='Unable to fetch list of available nodes for load balancer pool id: {0}. {1}'.format(
+ pool_id, str(e.response_text)))
+ return result
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(required=True),
+ description=dict(default=None),
+ location=dict(required=True),
+ alias=dict(required=True),
+ port=dict(choices=[80, 443]),
+ method=dict(choices=['leastConnection', 'roundRobin']),
+ persistence=dict(choices=['standard', 'sticky']),
+ nodes=dict(type='list', default=[]),
+ status=dict(default='enabled', choices=['enabled', 'disabled']),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'port_absent',
+ 'nodes_present',
+ 'nodes_absent'])
+ )
+ return argument_spec
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(argument_spec=ClcLoadBalancer.define_argument_spec(),
+ supports_check_mode=True)
+ clc_loadbalancer = ClcLoadBalancer(module)
+ clc_loadbalancer.process_request()
+
+from ansible.module_utils.basic import * # pylint: disable=W0614
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/centurylink/clc_modify_server.py b/lib/ansible/modules/cloud/centurylink/clc_modify_server.py
new file mode 100644
index 0000000000..d65073dacc
--- /dev/null
+++ b/lib/ansible/modules/cloud/centurylink/clc_modify_server.py
@@ -0,0 +1,981 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: clc_modify_server
+short_description: modify servers in CenturyLink Cloud.
+description:
+ - An Ansible module to modify servers in CenturyLink Cloud.
+version_added: "2.0"
+options:
+ server_ids:
+ description:
+ - A list of server Ids to modify.
+ required: True
+ cpu:
+ description:
+ - How many CPUs to update on the server
+ required: False
+ default: None
+ memory:
+ description:
+ - Memory (in GB) to set to the server.
+ required: False
+ default: None
+ anti_affinity_policy_id:
+ description:
+ - The anti affinity policy id to be set for a hyper scale server.
+ This is mutually exclusive with 'anti_affinity_policy_name'
+ required: False
+ default: None
+ anti_affinity_policy_name:
+ description:
+ - The anti affinity policy name to be set for a hyper scale server.
+ This is mutually exclusive with 'anti_affinity_policy_id'
+ required: False
+ default: None
+ alert_policy_id:
+ description:
+ - The alert policy id to be associated to the server.
+ This is mutually exclusive with 'alert_policy_name'
+ required: False
+ default: None
+ alert_policy_name:
+ description:
+ - The alert policy name to be associated to the server.
+ This is mutually exclusive with 'alert_policy_id'
+ required: False
+ default: None
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ default: 'present'
+ required: False
+ choices: ['present', 'absent']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ default: True
+ required: False
+ choices: [ True, False]
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: set the cpu count to 4 on a server
+ clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ cpu: 4
+ state: present
+
+- name: set the memory to 8GB on a server
+ clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ memory: 8
+ state: present
+
+- name: set the anti affinity policy on a server
+ clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ anti_affinity_policy_name: 'aa_policy'
+ state: present
+
+- name: remove the anti affinity policy on a server
+ clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ anti_affinity_policy_name: 'aa_policy'
+ state: absent
+
+- name: add the alert policy on a server
+ clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ alert_policy_name: 'alert_policy'
+ state: present
+
+- name: remove the alert policy on a server
+ clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ alert_policy_name: 'alert_policy'
+ state: absent
+
+- name: set the memory to 16GB and cpu to 8 core on a lust if servers
+ clc_modify_server:
+ server_ids:
+ - UC1TESTSVR01
+ - UC1TESTSVR02
+ cpu: 8
+ memory: 16
+ state: present
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+servers:
+ description: The list of server objects that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":1438196820,
+ "modifiedBy":"service.wfad",
+ "modifiedDate":1438196820
+ },
+ "description":"test-server",
+ "details":{
+ "alertPolicies":[
+
+ ],
+ "cpu":1,
+ "customFields":[
+
+ ],
+ "diskCount":3,
+ "disks":[
+ {
+ "id":"0:0",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":1
+ },
+ {
+ "id":"0:1",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":2
+ },
+ {
+ "id":"0:2",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":14
+ }
+ ],
+ "hostName":"",
+ "inMaintenanceMode":false,
+ "ipAddresses":[
+ {
+ "internal":"10.1.1.1"
+ }
+ ],
+ "memoryGB":1,
+ "memoryMB":1024,
+ "partitions":[
+
+ ],
+ "powerState":"started",
+ "snapshots":[
+
+ ],
+ "storageGB":17
+ },
+ "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"test-server",
+ "ipaddress":"10.120.45.23",
+ "isTemplate":false,
+ "links":[
+ {
+ "href":"/v2/servers/wfad/test-server",
+ "id":"test-server",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"group"
+ },
+ {
+ "href":"/v2/accounts/wfad",
+ "id":"wfad",
+ "rel":"account"
+ },
+ {
+ "href":"/v2/billing/wfad/serverPricing/test-server",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/publicIPAddresses",
+ "rel":"publicIPAddresses",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/credentials",
+ "rel":"credentials"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/capabilities",
+ "rel":"capabilities"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/alertPolicies",
+ "rel":"alertPolicyMappings",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
+ "rel":"antiAffinityPolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
+ "rel":"cpuAutoscalePolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test-server",
+ "os":"ubuntu14_64Bit",
+ "osType":"Ubuntu 14 64-bit",
+ "status":"active",
+ "storageType":"standard",
+ "type":"standard"
+ }
+ ]
+'''
+
+__version__ = '${version}'
+
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcModifyServer:
+ clc = clc_sdk
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ self._set_clc_credentials_from_env()
+
+ p = self.module.params
+ cpu = p.get('cpu')
+ memory = p.get('memory')
+ state = p.get('state')
+ if state == 'absent' and (cpu or memory):
+ return self.module.fail_json(
+ msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments')
+
+ server_ids = p['server_ids']
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of instances to modify: %s' %
+ server_ids)
+
+ (changed, server_dict_array, changed_server_ids) = self._modify_servers(
+ server_ids=server_ids)
+
+ self.module.exit_json(
+ changed=changed,
+ server_ids=changed_server_ids,
+ servers=server_dict_array)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ cpu=dict(),
+ memory=dict(),
+ anti_affinity_policy_id=dict(),
+ anti_affinity_policy_name=dict(),
+ alert_policy_id=dict(),
+ alert_policy_name=dict(),
+ wait=dict(type='bool', default=True)
+ )
+ mutually_exclusive = [
+ ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
+ ['alert_policy_id', 'alert_policy_name']
+ ]
+ return {"argument_spec": argument_spec,
+ "mutually_exclusive": mutually_exclusive}
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: The list of server ids
+ :param message: the error message to throw in case of any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ return self.module.fail_json(msg=message + ': %s' % ex.message)
+
+ def _modify_servers(self, server_ids):
+ """
+ modify the servers configuration on the provided list
+ :param server_ids: list of servers to modify
+ :return: a list of dictionaries with server information about the servers that were modified
+ """
+ p = self.module.params
+ state = p.get('state')
+ server_params = {
+ 'cpu': p.get('cpu'),
+ 'memory': p.get('memory'),
+ 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
+ 'anti_affinity_policy_name': p.get('anti_affinity_policy_name'),
+ 'alert_policy_id': p.get('alert_policy_id'),
+ 'alert_policy_name': p.get('alert_policy_name'),
+ }
+ changed = False
+ server_changed = False
+ aa_changed = False
+ ap_changed = False
+ server_dict_array = []
+ result_server_ids = []
+ request_list = []
+ changed_servers = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return self.module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ for server in servers:
+ if state == 'present':
+ server_changed, server_result = self._ensure_server_config(
+ server, server_params)
+ if server_result:
+ request_list.append(server_result)
+ aa_changed = self._ensure_aa_policy_present(
+ server,
+ server_params)
+ ap_changed = self._ensure_alert_policy_present(
+ server,
+ server_params)
+ elif state == 'absent':
+ aa_changed = self._ensure_aa_policy_absent(
+ server,
+ server_params)
+ ap_changed = self._ensure_alert_policy_absent(
+ server,
+ server_params)
+ if server_changed or aa_changed or ap_changed:
+ changed_servers.append(server)
+ changed = True
+
+ self._wait_for_requests(self.module, request_list)
+ self._refresh_servers(self.module, changed_servers)
+
+ for server in changed_servers:
+ server_dict_array.append(server.data)
+ result_server_ids.append(server.id)
+
+ return changed, server_dict_array, result_server_ids
+
+ def _ensure_server_config(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided cpu and memory
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ cpu = server_params.get('cpu')
+ memory = server_params.get('memory')
+ changed = False
+ result = None
+
+ if not cpu:
+ cpu = server.cpu
+ if not memory:
+ memory = server.memory
+ if memory != server.memory or cpu != server.cpu:
+ if not self.module.check_mode:
+ result = self._modify_clc_server(
+ self.clc,
+ self.module,
+ server.id,
+ cpu,
+ memory)
+ changed = True
+ return changed, result
+
+ @staticmethod
+ def _modify_clc_server(clc, module, server_id, cpu, memory):
+ """
+ Modify the memory or CPU of a clc server.
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param server_id: id of the server to modify
+ :param cpu: the new cpu value
+ :param memory: the new memory value
+ :return: the result of CLC API call
+ """
+ result = None
+ acct_alias = clc.v2.Account.GetAlias()
+ try:
+ # Update the server configuration
+ job_obj = clc.v2.API.Call('PATCH',
+ 'servers/%s/%s' % (acct_alias,
+ server_id),
+ json.dumps([{"op": "set",
+ "member": "memory",
+ "value": memory},
+ {"op": "set",
+ "member": "cpu",
+ "value": cpu}]))
+ result = clc.v2.Requests(job_obj)
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to update the server configuration for server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _wait_for_requests(module, request_list):
+ """
+ Block until server provisioning requests are completed.
+ :param module: the AnsibleModule object
+ :param request_list: a list of clc-sdk.Request instances
+ :return: none
+ """
+ wait = module.params.get('wait')
+ if wait:
+ # Requests.WaitUntilComplete() returns the count of failed requests
+ failed_requests_count = sum(
+ [request.WaitUntilComplete() for request in request_list])
+
+ if failed_requests_count > 0:
+ module.fail_json(
+ msg='Unable to process modify server request')
+
+ @staticmethod
+ def _refresh_servers(module, servers):
+ """
+ Loop through a list of servers and refresh them.
+ :param module: the AnsibleModule object
+ :param servers: list of clc-sdk.Server instances to refresh
+ :return: none
+ """
+ for server in servers:
+ try:
+ server.Refresh()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
+ server.id, ex.message
+ ))
+
+ def _ensure_aa_policy_present(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided anti affinity policy
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+
+ aa_policy_id = server_params.get('anti_affinity_policy_id')
+ aa_policy_name = server_params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ aa_policy_id = self._get_aa_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ aa_policy_name)
+ current_aa_policy_id = self._get_aa_policy_id_of_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+
+ if aa_policy_id and aa_policy_id != current_aa_policy_id:
+ self._modify_aa_policy(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ aa_policy_id)
+ changed = True
+ return changed
+
+ def _ensure_aa_policy_absent(
+ self, server, server_params):
+ """
+ ensures the the provided anti affinity policy is removed from the server
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+ aa_policy_id = server_params.get('anti_affinity_policy_id')
+ aa_policy_name = server_params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ aa_policy_id = self._get_aa_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ aa_policy_name)
+ current_aa_policy_id = self._get_aa_policy_id_of_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+
+ if aa_policy_id and aa_policy_id == current_aa_policy_id:
+ self._delete_aa_policy(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id)
+ changed = True
+ return changed
+
+ @staticmethod
+ def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id):
+ """
+ modifies the anti affinity policy of the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param aa_policy_id: the anti affinity policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('PUT',
+ 'servers/%s/%s/antiAffinityPolicy' % (
+ acct_alias,
+ server_id),
+ json.dumps({"id": aa_policy_id}))
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _delete_aa_policy(clc, module, acct_alias, server_id):
+ """
+ Delete the anti affinity policy of the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('DELETE',
+ 'servers/%s/%s/antiAffinityPolicy' % (
+ acct_alias,
+ server_id),
+ json.dumps({}))
+ except APIFailedResponse as ex:
+ module.fail_json(
+ msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name):
+ """
+ retrieves the anti affinity policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param aa_policy_name: the anti affinity policy name
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ aa_policies = clc.v2.API.Call(method='GET',
+ url='antiAffinityPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(
+ msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format(
+ alias, str(ex.response_text)))
+ for aa_policy in aa_policies.get('items'):
+ if aa_policy.get('name') == aa_policy_name:
+ if not aa_policy_id:
+ aa_policy_id = aa_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
+ if not aa_policy_id:
+ module.fail_json(
+ msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ @staticmethod
+ def _get_aa_policy_id_of_server(clc, module, alias, server_id):
+ """
+ retrieves the anti affinity policy id of the server based on the CLC server id
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param server_id: the CLC server id
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ result = clc.v2.API.Call(
+ method='GET', url='servers/%s/%s/antiAffinityPolicy' %
+ (alias, server_id))
+ aa_policy_id = result.get('id')
+ except APIFailedResponse as ex:
+ if ex.response_status_code != 404:
+ module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return aa_policy_id
+
+ def _ensure_alert_policy_present(
+ self, server, server_params):
+ """
+ ensures the server is updated with the provided alert policy
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+ acct_alias = self.clc.v2.Account.GetAlias()
+ alert_policy_id = server_params.get('alert_policy_id')
+ alert_policy_name = server_params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ alert_policy_name)
+ if alert_policy_id and not self._alert_policy_exists(
+ server, alert_policy_id):
+ self._add_alert_policy_to_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ alert_policy_id)
+ changed = True
+ return changed
+
+ def _ensure_alert_policy_absent(
+ self, server, server_params):
+ """
+ ensures the alert policy is removed from the server
+ :param server: the CLC server object
+ :param server_params: the dictionary of server parameters
+ :return: (changed, group) -
+ changed: Boolean whether a change was made
+ result: The result from the CLC API call
+ """
+ changed = False
+
+ acct_alias = self.clc.v2.Account.GetAlias()
+ alert_policy_id = server_params.get('alert_policy_id')
+ alert_policy_name = server_params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alert_policy_id = self._get_alert_policy_id_by_name(
+ self.clc,
+ self.module,
+ acct_alias,
+ alert_policy_name)
+
+ if alert_policy_id and self._alert_policy_exists(
+ server, alert_policy_id):
+ self._remove_alert_policy_to_server(
+ self.clc,
+ self.module,
+ acct_alias,
+ server.id,
+ alert_policy_id)
+ changed = True
+ return changed
+
+ @staticmethod
+ def _add_alert_policy_to_server(
+ clc, module, acct_alias, server_id, alert_policy_id):
+ """
+ add the alert policy to CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param alert_policy_id: the alert policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('POST',
+ 'servers/%s/%s/alertPolicies' % (
+ acct_alias,
+ server_id),
+ json.dumps({"id": alert_policy_id}))
+ except APIFailedResponse as ex:
+ module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _remove_alert_policy_to_server(
+ clc, module, acct_alias, server_id, alert_policy_id):
+ """
+ remove the alert policy to the CLC server
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param acct_alias: the CLC account alias
+ :param server_id: the CLC server id
+ :param alert_policy_id: the alert policy id
+ :return: result: The result from the CLC API call
+ """
+ result = None
+ if not module.check_mode:
+ try:
+ result = clc.v2.API.Call('DELETE',
+ 'servers/%s/%s/alertPolicies/%s'
+ % (acct_alias, server_id, alert_policy_id))
+ except APIFailedResponse as ex:
+ module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format(
+ server_id, str(ex.response_text)))
+ return result
+
+ @staticmethod
+ def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
+ """
+ retrieves the alert policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param alert_policy_name: the alert policy name
+ :return: alert_policy_id: The alert policy id
+ """
+ alert_policy_id = None
+ try:
+ alert_policies = clc.v2.API.Call(method='GET',
+ url='alertPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format(
+ alias, str(ex.response_text)))
+ for alert_policy in alert_policies.get('items'):
+ if alert_policy.get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = alert_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _alert_policy_exists(server, alert_policy_id):
+ """
+ Checks if the alert policy exists for the server
+ :param server: the clc server object
+ :param alert_policy_id: the alert policy
+ :return: True: if the given alert policy id associated to the server, False otherwise
+ """
+ result = False
+ alert_policies = server.alertPolicies
+ if alert_policies:
+ for alert_policy in alert_policies:
+ if alert_policy.get('id') == alert_policy_id:
+ result = True
+ return result
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+
+ argument_dict = ClcModifyServer._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_modify_server = ClcModifyServer(module)
+ clc_modify_server.process_request()
+
+from ansible.module_utils.basic import * # pylint: disable=W0614
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/centurylink/clc_publicip.py b/lib/ansible/modules/cloud/centurylink/clc_publicip.py
new file mode 100644
index 0000000000..a53aeb7953
--- /dev/null
+++ b/lib/ansible/modules/cloud/centurylink/clc_publicip.py
@@ -0,0 +1,372 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: clc_publicip
+short_description: Add and Delete public ips on servers in CenturyLink Cloud.
+description:
+ - An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
+version_added: "2.0"
+options:
+ protocol:
+ description:
+ - The protocol that the public IP will listen for.
+ default: TCP
+ choices: ['TCP', 'UDP', 'ICMP']
+ required: False
+ ports:
+ description:
+ - A list of ports to expose. This is required when state is 'present'
+ required: False
+ default: None
+ server_ids:
+ description:
+ - A list of servers to create public ips on.
+ required: True
+ state:
+ description:
+ - Determine whether to create or delete public IPs. If present module will not create a second public ip if one
+ already exists.
+ default: present
+ choices: ['present', 'absent']
+ required: False
+ wait:
+ description:
+ - Whether to wait for the tasks to finish before returning.
+ choices: [ True, False ]
+ default: True
+ required: False
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Add Public IP to Server
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create Public IP For Servers
+ clc_publicip:
+ protocol: TCP
+ ports:
+ - 80
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ state: present
+ register: clc
+
+ - name: debug
+ debug:
+ var: clc
+
+- name: Delete Public IP from Server
+ hosts: localhost
+ gather_facts: False
+ connection: local
+ tasks:
+ - name: Create Public IP For Servers
+ clc_publicip:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ state: absent
+ register: clc
+
+ - name: debug
+ debug:
+ var: clc
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+'''
+
+__version__ = '${version}'
+
+import os
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+class ClcPublicIp(object):
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ self._set_clc_credentials_from_env()
+ params = self.module.params
+ server_ids = params['server_ids']
+ ports = params['ports']
+ protocol = params['protocol']
+ state = params['state']
+
+ if state == 'present':
+ changed, changed_server_ids, requests = self.ensure_public_ip_present(
+ server_ids=server_ids, protocol=protocol, ports=ports)
+ elif state == 'absent':
+ changed, changed_server_ids, requests = self.ensure_public_ip_absent(
+ server_ids=server_ids)
+ else:
+ return self.module.fail_json(msg="Unknown State: " + state)
+ self._wait_for_requests_to_complete(requests)
+ return self.module.exit_json(changed=changed,
+ server_ids=changed_server_ids)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
+ ports=dict(type='list'),
+ wait=dict(type='bool', default=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ return argument_spec
+
+ def ensure_public_ip_present(self, server_ids, protocol, ports):
+ """
+ Ensures the given server ids having the public ip available
+ :param server_ids: the list of server ids
+ :param protocol: the ip protocol
+ :param ports: the list of ports to expose
+ :return: (changed, changed_server_ids, results)
+ changed: A flag indicating if there is any change
+ changed_server_ids : the list of server ids that are changed
+ results: The result list from clc public ip call
+ """
+ changed = False
+ results = []
+ changed_server_ids = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.PublicIPs().public_ips) == 0]
+ ports_to_expose = [{'protocol': protocol, 'port': port}
+ for port in ports]
+ for server in servers_to_change:
+ if not self.module.check_mode:
+ result = self._add_publicip_to_server(server, ports_to_expose)
+ results.append(result)
+ changed_server_ids.append(server.id)
+ changed = True
+ return changed, changed_server_ids, results
+
+ def _add_publicip_to_server(self, server, ports_to_expose):
+ result = None
+ try:
+ result = server.PublicIPs().Add(ports_to_expose)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_public_ip_absent(self, server_ids):
+ """
+ Ensures the given server ids having the public ip removed if there is any
+ :param server_ids: the list of server ids
+ :return: (changed, changed_server_ids, results)
+ changed: A flag indicating if there is any change
+ changed_server_ids : the list of server ids that are changed
+ results: The result list from clc public ip call
+ """
+ changed = False
+ results = []
+ changed_server_ids = []
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.PublicIPs().public_ips) > 0]
+ for server in servers_to_change:
+ if not self.module.check_mode:
+ result = self._remove_publicip_from_server(server)
+ results.append(result)
+ changed_server_ids.append(server.id)
+ changed = True
+ return changed, changed_server_ids, results
+
+ def _remove_publicip_from_server(self, server):
+ result = None
+ try:
+ for ip_address in server.PublicIPs().public_ips:
+ result = ip_address.Delete()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process public ip request')
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ def _get_servers_from_clc(self, server_ids, message):
+ """
+ Gets list of servers form CLC api
+ """
+ try:
+ return self.clc.v2.Servers(server_ids).servers
+ except CLCException as exception:
+ self.module.fail_json(msg=message + ': %s' % exception)
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ module = AnsibleModule(
+ argument_spec=ClcPublicIp._define_module_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_public_ip = ClcPublicIp(module)
+ clc_public_ip.process_request()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/centurylink/clc_server.py b/lib/ansible/modules/cloud/centurylink/clc_server.py
new file mode 100644
index 0000000000..721582cc33
--- /dev/null
+++ b/lib/ansible/modules/cloud/centurylink/clc_server.py
@@ -0,0 +1,1592 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: clc_server
+short_description: Create, Delete, Start and Stop servers in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete, Start and Stop servers in CenturyLink Cloud.
+version_added: "2.0"
+options:
+ additional_disks:
+ description:
+ - The list of additional disks for the server
+ required: False
+ default: []
+ add_public_ip:
+ description:
+ - Whether to add a public ip to the server
+ required: False
+ default: False
+ choices: [False, True]
+ alias:
+ description:
+ - The account alias to provision the servers under.
+ required: False
+ default: None
+ anti_affinity_policy_id:
+ description:
+ - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_name'.
+ required: False
+ default: None
+ anti_affinity_policy_name:
+ description:
+ - The anti-affinity policy to assign to the server. This is mutually exclusive with 'anti_affinity_policy_id'.
+ required: False
+ default: None
+ alert_policy_id:
+ description:
+ - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_name'.
+ required: False
+ default: None
+ alert_policy_name:
+ description:
+ - The alert policy to assign to the server. This is mutually exclusive with 'alert_policy_id'.
+ required: False
+ default: None
+ count:
+ description:
+ - The number of servers to build (mutually exclusive with exact_count)
+ required: False
+ default: 1
+ count_group:
+ description:
+ - Required when exact_count is specified. The Server Group use to determine how many severs to deploy.
+ required: False
+ default: None
+ cpu:
+ description:
+ - How many CPUs to provision on the server
+ default: 1
+ required: False
+ cpu_autoscale_policy_id:
+ description:
+ - The autoscale policy to assign to the server.
+ default: None
+ required: False
+ custom_fields:
+ description:
+ - The list of custom fields to set on the server.
+ default: []
+ required: False
+ description:
+ description:
+ - The description to set for the server.
+ default: None
+ required: False
+ exact_count:
+ description:
+ - Run in idempotent mode. Will insure that this exact number of servers are running in the provided group,
+ creating and deleting them to reach that count. Requires count_group to be set.
+ default: None
+ required: False
+ group:
+ description:
+ - The Server Group to create servers under.
+ default: 'Default Group'
+ required: False
+ ip_address:
+ description:
+ - The IP Address for the server. One is assigned if not provided.
+ default: None
+ required: False
+ location:
+ description:
+ - The Datacenter to create servers in.
+ default: None
+ required: False
+ managed_os:
+ description:
+ - Whether to create the server as 'Managed' or not.
+ default: False
+ required: False
+ choices: [True, False]
+ memory:
+ description:
+ - Memory in GB.
+ default: 1
+ required: False
+ name:
+ description:
+ - A 1 to 6 character identifier to use for the server. This is required when state is 'present'
+ default: None
+ required: False
+ network_id:
+ description:
+ - The network UUID on which to create servers.
+ default: None
+ required: False
+ packages:
+ description:
+ - The list of blue print packages to run on the server after its created.
+ default: []
+ required: False
+ password:
+ description:
+ - Password for the administrator / root user
+ default: None
+ required: False
+ primary_dns:
+ description:
+ - Primary DNS used by the server.
+ default: None
+ required: False
+ public_ip_protocol:
+ description:
+ - The protocol to use for the public ip if add_public_ip is set to True.
+ default: 'TCP'
+ choices: ['TCP', 'UDP', 'ICMP']
+ required: False
+ public_ip_ports:
+ description:
+ - A list of ports to allow on the firewall to the servers public ip, if add_public_ip is set to True.
+ default: []
+ required: False
+ secondary_dns:
+ description:
+ - Secondary DNS used by the server.
+ default: None
+ required: False
+ server_ids:
+ description:
+ - Required for started, stopped, and absent states.
+ A list of server Ids to insure are started, stopped, or absent.
+ default: []
+ required: False
+ source_server_password:
+ description:
+ - The password for the source server if a clone is specified.
+ default: None
+ required: False
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ default: 'present'
+ required: False
+ choices: ['present', 'absent', 'started', 'stopped']
+ storage_type:
+ description:
+ - The type of storage to attach to the server.
+ default: 'standard'
+ required: False
+ choices: ['standard', 'hyperscale']
+ template:
+ description:
+ - The template to use for server creation. Will search for a template if a partial string is provided.
+ This is required when state is 'present'
+ default: None
+ required: False
+ ttl:
+ description:
+ - The time to live for the server in seconds. The server will be deleted when this time expires.
+ default: None
+ required: False
+ type:
+ description:
+ - The type of server to create.
+ default: 'standard'
+ required: False
+ choices: ['standard', 'hyperscale', 'bareMetal']
+ configuration_id:
+ description:
+ - Only required for bare metal servers.
+ Specifies the identifier for the specific configuration type of bare metal server to deploy.
+ default: None
+ required: False
+ os_type:
+ description:
+ - Only required for bare metal servers.
+ Specifies the OS to provision with the bare metal server.
+ default: None
+ required: False
+ choices: ['redHat6_64Bit', 'centOS6_64Bit', 'windows2012R2Standard_64Bit', 'ubuntu14_64Bit']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ default: True
+ required: False
+ choices: [True, False]
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Provision a single Ubuntu Server
+ clc_server:
+ name: test
+ template: ubuntu-14-64
+ count: 1
+ group: Default Group
+ state: present
+
+- name: Ensure 'Default Group' has exactly 5 servers
+ clc_server:
+ name: test
+ template: ubuntu-14-64
+ exact_count: 5
+ count_group: Default Group
+ group: Default Group
+
+- name: Stop a Server
+ clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: stopped
+
+- name: Start a Server
+ clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: started
+
+- name: Delete a Server
+ clc_server:
+ server_ids:
+ - UC1ACCT-TEST01
+ state: absent
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are created
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+partially_created_server_ids:
+ description: The list of server ids that are partially created
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+servers:
+ description: The list of server objects returned from CLC
+ returned: success
+ type: list
+ sample:
+ [
+ {
+ "changeInfo":{
+ "createdBy":"service.wfad",
+ "createdDate":1438196820,
+ "modifiedBy":"service.wfad",
+ "modifiedDate":1438196820
+ },
+ "description":"test-server",
+ "details":{
+ "alertPolicies":[
+
+ ],
+ "cpu":1,
+ "customFields":[
+
+ ],
+ "diskCount":3,
+ "disks":[
+ {
+ "id":"0:0",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":1
+ },
+ {
+ "id":"0:1",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":2
+ },
+ {
+ "id":"0:2",
+ "partitionPaths":[
+
+ ],
+ "sizeGB":14
+ }
+ ],
+ "hostName":"",
+ "inMaintenanceMode":false,
+ "ipAddresses":[
+ {
+ "internal":"10.1.1.1"
+ }
+ ],
+ "memoryGB":1,
+ "memoryMB":1024,
+ "partitions":[
+
+ ],
+ "powerState":"started",
+ "snapshots":[
+
+ ],
+ "storageGB":17
+ },
+ "groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"test-server",
+ "ipaddress":"10.120.45.23",
+ "isTemplate":false,
+ "links":[
+ {
+ "href":"/v2/servers/wfad/test-server",
+ "id":"test-server",
+ "rel":"self",
+ "verbs":[
+ "GET",
+ "PATCH",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
+ "id":"086ac1dfe0b6411989e8d1b77c4065f0",
+ "rel":"group"
+ },
+ {
+ "href":"/v2/accounts/wfad",
+ "id":"wfad",
+ "rel":"account"
+ },
+ {
+ "href":"/v2/billing/wfad/serverPricing/test-server",
+ "rel":"billing"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/publicIPAddresses",
+ "rel":"publicIPAddresses",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/credentials",
+ "rel":"credentials"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/statistics",
+ "rel":"statistics"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
+ "rel":"upcomingScheduledActivities"
+ },
+ {
+ "href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
+ "rel":"scheduledActivities",
+ "verbs":[
+ "GET",
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/capabilities",
+ "rel":"capabilities"
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/alertPolicies",
+ "rel":"alertPolicyMappings",
+ "verbs":[
+ "POST"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
+ "rel":"antiAffinityPolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ },
+ {
+ "href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
+ "rel":"cpuAutoscalePolicyMapping",
+ "verbs":[
+ "PUT",
+ "DELETE"
+ ]
+ }
+ ],
+ "locationId":"UC1",
+ "name":"test-server",
+ "os":"ubuntu14_64Bit",
+ "osType":"Ubuntu 14 64-bit",
+ "status":"active",
+ "storageType":"standard",
+ "type":"standard"
+ }
+ ]
+'''
+
+__version__ = '${version}'
+
+from time import sleep
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+ from clc import APIFailedResponse
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcServer:
+ clc = clc_sdk
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.clc = clc_sdk
+ self.module = module
+ self.group_dict = {}
+
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ changed = False
+ new_server_ids = []
+ server_dict_array = []
+
+ self._set_clc_credentials_from_env()
+ self.module.params = self._validate_module_params(
+ self.clc,
+ self.module)
+ p = self.module.params
+ state = p.get('state')
+
+ #
+ # Handle each state
+ #
+ partial_servers_ids = []
+ if state == 'absent':
+ server_ids = p['server_ids']
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of instances to delete: %s' %
+ server_ids)
+
+ (changed,
+ server_dict_array,
+ new_server_ids) = self._delete_servers(module=self.module,
+ clc=self.clc,
+ server_ids=server_ids)
+
+ elif state in ('started', 'stopped'):
+ server_ids = p.get('server_ids')
+ if not isinstance(server_ids, list):
+ return self.module.fail_json(
+ msg='server_ids needs to be a list of servers to run: %s' %
+ server_ids)
+
+ (changed,
+ server_dict_array,
+ new_server_ids) = self._start_stop_servers(self.module,
+ self.clc,
+ server_ids)
+
+ elif state == 'present':
+ # Changed is always set to true when provisioning new instances
+ if not p.get('template') and p.get('type') != 'bareMetal':
+ return self.module.fail_json(
+ msg='template parameter is required for new instance')
+
+ if p.get('exact_count') is None:
+ (server_dict_array,
+ new_server_ids,
+ partial_servers_ids,
+ changed) = self._create_servers(self.module,
+ self.clc)
+ else:
+ (server_dict_array,
+ new_server_ids,
+ partial_servers_ids,
+ changed) = self._enforce_count(self.module,
+ self.clc)
+
+ self.module.exit_json(
+ changed=changed,
+ server_ids=new_server_ids,
+ partially_created_server_ids=partial_servers_ids,
+ servers=server_dict_array)
+
+ @staticmethod
+ def _define_module_argument_spec():
+ """
+ Define the argument spec for the ansible module
+ :return: argument spec dictionary
+ """
+ argument_spec = dict(
+ name=dict(),
+ template=dict(),
+ group=dict(default='Default Group'),
+ network_id=dict(),
+ location=dict(default=None),
+ cpu=dict(default=1),
+ memory=dict(default=1),
+ alias=dict(default=None),
+ password=dict(default=None, no_log=True),
+ ip_address=dict(default=None),
+ storage_type=dict(
+ default='standard',
+ choices=[
+ 'standard',
+ 'hyperscale']),
+ type=dict(default='standard', choices=['standard', 'hyperscale', 'bareMetal']),
+ primary_dns=dict(default=None),
+ secondary_dns=dict(default=None),
+ additional_disks=dict(type='list', default=[]),
+ custom_fields=dict(type='list', default=[]),
+ ttl=dict(default=None),
+ managed_os=dict(type='bool', default=False),
+ description=dict(default=None),
+ source_server_password=dict(default=None),
+ cpu_autoscale_policy_id=dict(default=None),
+ anti_affinity_policy_id=dict(default=None),
+ anti_affinity_policy_name=dict(default=None),
+ alert_policy_id=dict(default=None),
+ alert_policy_name=dict(default=None),
+ packages=dict(type='list', default=[]),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'started',
+ 'stopped']),
+ count=dict(type='int', default=1),
+ exact_count=dict(type='int', default=None),
+ count_group=dict(),
+ server_ids=dict(type='list', default=[]),
+ add_public_ip=dict(type='bool', default=False),
+ public_ip_protocol=dict(
+ default='TCP',
+ choices=[
+ 'TCP',
+ 'UDP',
+ 'ICMP']),
+ public_ip_ports=dict(type='list', default=[]),
+ configuration_id=dict(default=None),
+ os_type=dict(default=None,
+ choices=[
+ 'redHat6_64Bit',
+ 'centOS6_64Bit',
+ 'windows2012R2Standard_64Bit',
+ 'ubuntu14_64Bit'
+ ]),
+ wait=dict(type='bool', default=True))
+
+ mutually_exclusive = [
+ ['exact_count', 'count'],
+ ['exact_count', 'state'],
+ ['anti_affinity_policy_id', 'anti_affinity_policy_name'],
+ ['alert_policy_id', 'alert_policy_name'],
+ ]
+ return {"argument_spec": argument_spec,
+ "mutually_exclusive": mutually_exclusive}
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _validate_module_params(clc, module):
+ """
+ Validate the module params, and lookup default values.
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: dictionary of validated params
+ """
+ params = module.params
+ datacenter = ClcServer._find_datacenter(clc, module)
+
+ ClcServer._validate_types(module)
+ ClcServer._validate_name(module)
+
+ params['alias'] = ClcServer._find_alias(clc, module)
+ params['cpu'] = ClcServer._find_cpu(clc, module)
+ params['memory'] = ClcServer._find_memory(clc, module)
+ params['description'] = ClcServer._find_description(module)
+ params['ttl'] = ClcServer._find_ttl(clc, module)
+ params['template'] = ClcServer._find_template_id(module, datacenter)
+ params['group'] = ClcServer._find_group(module, datacenter).id
+ params['network_id'] = ClcServer._find_network_id(module, datacenter)
+ params['anti_affinity_policy_id'] = ClcServer._find_aa_policy_id(
+ clc,
+ module)
+ params['alert_policy_id'] = ClcServer._find_alert_policy_id(
+ clc,
+ module)
+
+ return params
+
+ @staticmethod
+ def _find_datacenter(clc, module):
+ """
+ Find the datacenter by calling the CLC API.
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: clc-sdk.Datacenter instance
+ """
+ location = module.params.get('location')
+ try:
+ if not location:
+ account = clc.v2.Account()
+ location = account.data.get('primaryDataCenter')
+ data_center = clc.v2.Datacenter(location)
+ return data_center
+ except CLCException as ex:
+ module.fail_json(
+ msg=str(
+ "Unable to find location: {0}".format(location)))
+
+ @staticmethod
+ def _find_alias(clc, module):
+ """
+ Find or Validate the Account Alias by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: clc-sdk.Account instance
+ """
+ alias = module.params.get('alias')
+ if not alias:
+ try:
+ alias = clc.v2.Account.GetAlias()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to find account alias. {0}'.format(
+ ex.message
+ ))
+ return alias
+
+ @staticmethod
+ def _find_cpu(clc, module):
+ """
+ Find or validate the CPU value by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: Int value for CPU
+ """
+ cpu = module.params.get('cpu')
+ group_id = module.params.get('group_id')
+ alias = module.params.get('alias')
+ state = module.params.get('state')
+
+ if not cpu and state == 'present':
+ group = clc.v2.Group(id=group_id,
+ alias=alias)
+ if group.Defaults("cpu"):
+ cpu = group.Defaults("cpu")
+ else:
+ module.fail_json(
+ msg=str("Can\'t determine a default cpu value. Please provide a value for cpu."))
+ return cpu
+
+ @staticmethod
+ def _find_memory(clc, module):
+ """
+ Find or validate the Memory value by calling the CLC API
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: Int value for Memory
+ """
+ memory = module.params.get('memory')
+ group_id = module.params.get('group_id')
+ alias = module.params.get('alias')
+ state = module.params.get('state')
+
+ if not memory and state == 'present':
+ group = clc.v2.Group(id=group_id,
+ alias=alias)
+ if group.Defaults("memory"):
+ memory = group.Defaults("memory")
+ else:
+ module.fail_json(msg=str(
+ "Can\'t determine a default memory value. Please provide a value for memory."))
+ return memory
+
+ @staticmethod
+ def _find_description(module):
+ """
+ Set the description module param to name if description is blank
+ :param module: the module to validate
+ :return: string description
+ """
+ description = module.params.get('description')
+ if not description:
+ description = module.params.get('name')
+ return description
+
+ @staticmethod
+ def _validate_types(module):
+ """
+ Validate that type and storage_type are set appropriately, and fail if not
+ :param module: the module to validate
+ :return: none
+ """
+ state = module.params.get('state')
+ server_type = module.params.get(
+ 'type').lower() if module.params.get('type') else None
+ storage_type = module.params.get(
+ 'storage_type').lower() if module.params.get('storage_type') else None
+
+ if state == "present":
+ if server_type == "standard" and storage_type not in (
+ "standard", "premium"):
+ module.fail_json(
+ msg=str("Standard VMs must have storage_type = 'standard' or 'premium'"))
+
+ if server_type == "hyperscale" and storage_type != "hyperscale":
+ module.fail_json(
+ msg=str("Hyperscale VMs must have storage_type = 'hyperscale'"))
+
+ @staticmethod
+ def _validate_name(module):
+ """
+ Validate that name is the correct length if provided, fail if it's not
+ :param module: the module to validate
+ :return: none
+ """
+ server_name = module.params.get('name')
+ state = module.params.get('state')
+
+ if state == 'present' and (
+ len(server_name) < 1 or len(server_name) > 6):
+ module.fail_json(msg=str(
+ "When state = 'present', name must be a string with a minimum length of 1 and a maximum length of 6"))
+
+ @staticmethod
+ def _find_ttl(clc, module):
+ """
+ Validate that TTL is > 3600 if set, and fail if not
+ :param clc: clc-sdk instance to use
+ :param module: module to validate
+ :return: validated ttl
+ """
+ ttl = module.params.get('ttl')
+
+ if ttl:
+ if ttl <= 3600:
+ return module.fail_json(msg=str("Ttl cannot be <= 3600"))
+ else:
+ ttl = clc.v2.time_utils.SecondsToZuluTS(int(time.time()) + ttl)
+ return ttl
+
+ @staticmethod
+ def _find_template_id(module, datacenter):
+ """
+ Find the template id by calling the CLC API.
+ :param module: the module to validate
+ :param datacenter: the datacenter to search for the template
+ :return: a valid clc template id
+ """
+ lookup_template = module.params.get('template')
+ state = module.params.get('state')
+ type = module.params.get('type')
+ result = None
+
+ if state == 'present' and type != 'bareMetal':
+ try:
+ result = datacenter.Templates().Search(lookup_template)[0].id
+ except CLCException:
+ module.fail_json(
+ msg=str(
+ "Unable to find a template: " +
+ lookup_template +
+ " in location: " +
+ datacenter.id))
+ return result
+
+ @staticmethod
+ def _find_network_id(module, datacenter):
+ """
+ Validate the provided network id or return a default.
+ :param module: the module to validate
+ :param datacenter: the datacenter to search for a network id
+ :return: a valid network id
+ """
+ network_id = module.params.get('network_id')
+
+ if not network_id:
+ try:
+ network_id = datacenter.Networks().networks[0].id
+ # -- added for clc-sdk 2.23 compatibility
+ # datacenter_networks = clc_sdk.v2.Networks(
+ # networks_lst=datacenter._DeploymentCapabilities()['deployableNetworks'])
+ # network_id = datacenter_networks.networks[0].id
+ # -- end
+ except CLCException:
+ module.fail_json(
+ msg=str(
+ "Unable to find a network in location: " +
+ datacenter.id))
+
+ return network_id
+
+ @staticmethod
+ def _find_aa_policy_id(clc, module):
+ """
+ Validate if the anti affinity policy exist for the given name and throw error if not
+ :param clc: the clc-sdk instance
+ :param module: the module to validate
+ :return: aa_policy_id: the anti affinity policy id of the given name.
+ """
+ aa_policy_id = module.params.get('anti_affinity_policy_id')
+ aa_policy_name = module.params.get('anti_affinity_policy_name')
+ if not aa_policy_id and aa_policy_name:
+ alias = module.params.get('alias')
+ aa_policy_id = ClcServer._get_anti_affinity_policy_id(
+ clc,
+ module,
+ alias,
+ aa_policy_name)
+ if not aa_policy_id:
+ module.fail_json(
+ msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ @staticmethod
+ def _find_alert_policy_id(clc, module):
+ """
+ Validate if the alert policy exist for the given name and throw error if not
+ :param clc: the clc-sdk instance
+ :param module: the module to validate
+ :return: alert_policy_id: the alert policy id of the given name.
+ """
+ alert_policy_id = module.params.get('alert_policy_id')
+ alert_policy_name = module.params.get('alert_policy_name')
+ if not alert_policy_id and alert_policy_name:
+ alias = module.params.get('alias')
+ alert_policy_id = ClcServer._get_alert_policy_id_by_name(
+ clc=clc,
+ module=module,
+ alias=alias,
+ alert_policy_name=alert_policy_name
+ )
+ if not alert_policy_id:
+ module.fail_json(
+ msg='No alert policy exist with name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ def _create_servers(self, module, clc, override_count=None):
+ """
+ Create New Servers in CLC cloud
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :return: a list of dictionaries with server information about the servers that were created
+ """
+ p = module.params
+ request_list = []
+ servers = []
+ server_dict_array = []
+ created_server_ids = []
+ partial_created_servers_ids = []
+
+ add_public_ip = p.get('add_public_ip')
+ public_ip_protocol = p.get('public_ip_protocol')
+ public_ip_ports = p.get('public_ip_ports')
+
+ params = {
+ 'name': p.get('name'),
+ 'template': p.get('template'),
+ 'group_id': p.get('group'),
+ 'network_id': p.get('network_id'),
+ 'cpu': p.get('cpu'),
+ 'memory': p.get('memory'),
+ 'alias': p.get('alias'),
+ 'password': p.get('password'),
+ 'ip_address': p.get('ip_address'),
+ 'storage_type': p.get('storage_type'),
+ 'type': p.get('type'),
+ 'primary_dns': p.get('primary_dns'),
+ 'secondary_dns': p.get('secondary_dns'),
+ 'additional_disks': p.get('additional_disks'),
+ 'custom_fields': p.get('custom_fields'),
+ 'ttl': p.get('ttl'),
+ 'managed_os': p.get('managed_os'),
+ 'description': p.get('description'),
+ 'source_server_password': p.get('source_server_password'),
+ 'cpu_autoscale_policy_id': p.get('cpu_autoscale_policy_id'),
+ 'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
+ 'packages': p.get('packages'),
+ 'configuration_id': p.get('configuration_id'),
+ 'os_type': p.get('os_type')
+ }
+
+ count = override_count if override_count else p.get('count')
+
+ changed = False if count == 0 else True
+
+ if not changed:
+ return server_dict_array, created_server_ids, partial_created_servers_ids, changed
+ for i in range(0, count):
+ if not module.check_mode:
+ req = self._create_clc_server(clc=clc,
+ module=module,
+ server_params=params)
+ server = req.requests[0].Server()
+ request_list.append(req)
+ servers.append(server)
+
+ self._wait_for_requests(module, request_list)
+ self._refresh_servers(module, servers)
+
+ ip_failed_servers = self._add_public_ip_to_servers(
+ module=module,
+ should_add_public_ip=add_public_ip,
+ servers=servers,
+ public_ip_protocol=public_ip_protocol,
+ public_ip_ports=public_ip_ports)
+ ap_failed_servers = self._add_alert_policy_to_servers(clc=clc,
+ module=module,
+ servers=servers)
+
+ for server in servers:
+ if server in ip_failed_servers or server in ap_failed_servers:
+ partial_created_servers_ids.append(server.id)
+ else:
+ # reload server details
+ server = clc.v2.Server(server.id)
+ server.data['ipaddress'] = server.details[
+ 'ipAddresses'][0]['internal']
+
+ if add_public_ip and len(server.PublicIPs().public_ips) > 0:
+ server.data['publicip'] = str(
+ server.PublicIPs().public_ips[0])
+ created_server_ids.append(server.id)
+ server_dict_array.append(server.data)
+
+ return server_dict_array, created_server_ids, partial_created_servers_ids, changed
+
+ def _enforce_count(self, module, clc):
+ """
+ Enforce that there is the right number of servers in the provided group.
+ Starts or stops servers as necessary.
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :return: a list of dictionaries with server information about the servers that were created or deleted
+ """
+ p = module.params
+ changed = False
+ count_group = p.get('count_group')
+ datacenter = ClcServer._find_datacenter(clc, module)
+ exact_count = p.get('exact_count')
+ server_dict_array = []
+ partial_servers_ids = []
+ changed_server_ids = []
+
+ # fail here if the exact count was specified without filtering
+ # on a group, as this may lead to a undesired removal of instances
+ if exact_count and count_group is None:
+ return module.fail_json(
+ msg="you must use the 'count_group' option with exact_count")
+
+ servers, running_servers = ClcServer._find_running_servers_by_group(
+ module, datacenter, count_group)
+
+ if len(running_servers) == exact_count:
+ changed = False
+
+ elif len(running_servers) < exact_count:
+ to_create = exact_count - len(running_servers)
+ server_dict_array, changed_server_ids, partial_servers_ids, changed \
+ = self._create_servers(module, clc, override_count=to_create)
+
+ for server in server_dict_array:
+ running_servers.append(server)
+
+ elif len(running_servers) > exact_count:
+ to_remove = len(running_servers) - exact_count
+ all_server_ids = sorted([x.id for x in running_servers])
+ remove_ids = all_server_ids[0:to_remove]
+
+ (changed, server_dict_array, changed_server_ids) \
+ = ClcServer._delete_servers(module, clc, remove_ids)
+
+ return server_dict_array, changed_server_ids, partial_servers_ids, changed
+
+ @staticmethod
+ def _wait_for_requests(module, request_list):
+ """
+ Block until server provisioning requests are completed.
+ :param module: the AnsibleModule object
+ :param request_list: a list of clc-sdk.Request instances
+ :return: none
+ """
+ wait = module.params.get('wait')
+ if wait:
+ # Requests.WaitUntilComplete() returns the count of failed requests
+ failed_requests_count = sum(
+ [request.WaitUntilComplete() for request in request_list])
+
+ if failed_requests_count > 0:
+ module.fail_json(
+ msg='Unable to process server request')
+
+ @staticmethod
+ def _refresh_servers(module, servers):
+ """
+ Loop through a list of servers and refresh them.
+ :param module: the AnsibleModule object
+ :param servers: list of clc-sdk.Server instances to refresh
+ :return: none
+ """
+ for server in servers:
+ try:
+ server.Refresh()
+ except CLCException as ex:
+ module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
+ server.id, ex.message
+ ))
+
+ @staticmethod
+ def _add_public_ip_to_servers(
+ module,
+ should_add_public_ip,
+ servers,
+ public_ip_protocol,
+ public_ip_ports):
+ """
+ Create a public IP for servers
+ :param module: the AnsibleModule object
+ :param should_add_public_ip: boolean - whether or not to provision a public ip for servers. Skipped if False
+ :param servers: List of servers to add public ips to
+ :param public_ip_protocol: a protocol to allow for the public ips
+ :param public_ip_ports: list of ports to allow for the public ips
+ :return: none
+ """
+ failed_servers = []
+ if not should_add_public_ip:
+ return failed_servers
+
+ ports_lst = []
+ request_list = []
+ server = None
+
+ for port in public_ip_ports:
+ ports_lst.append(
+ {'protocol': public_ip_protocol, 'port': port})
+ try:
+ if not module.check_mode:
+ for server in servers:
+ request = server.PublicIPs().Add(ports_lst)
+ request_list.append(request)
+ except APIFailedResponse:
+ failed_servers.append(server)
+ ClcServer._wait_for_requests(module, request_list)
+ return failed_servers
+
+ @staticmethod
+ def _add_alert_policy_to_servers(clc, module, servers):
+ """
+ Associate the alert policy to servers
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param servers: List of servers to add alert policy to
+ :return: failed_servers: the list of servers which failed while associating alert policy
+ """
+ failed_servers = []
+ p = module.params
+ alert_policy_id = p.get('alert_policy_id')
+ alias = p.get('alias')
+
+ if alert_policy_id and not module.check_mode:
+ for server in servers:
+ try:
+ ClcServer._add_alert_policy_to_server(
+ clc=clc,
+ alias=alias,
+ server_id=server.id,
+ alert_policy_id=alert_policy_id)
+ except CLCException:
+ failed_servers.append(server)
+ return failed_servers
+
+ @staticmethod
+ def _add_alert_policy_to_server(
+ clc, alias, server_id, alert_policy_id):
+ """
+ Associate an alert policy to a clc server
+ :param clc: the clc-sdk instance to use
+ :param alias: the clc account alias
+ :param server_id: The clc server id
+ :param alert_policy_id: the alert policy id to be associated to the server
+ :return: none
+ """
+ try:
+ clc.v2.API.Call(
+ method='POST',
+ url='servers/%s/%s/alertPolicies' % (alias, server_id),
+ payload=json.dumps(
+ {
+ 'id': alert_policy_id
+ }))
+ except APIFailedResponse as e:
+ raise CLCException(
+ 'Failed to associate alert policy to the server : {0} with Error {1}'.format(
+ server_id, str(e.response_text)))
+
+ @staticmethod
+ def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
+ """
+ Returns the alert policy id for the given alert policy name
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the clc account alias
+ :param alert_policy_name: the name of the alert policy
+ :return: alert_policy_id: the alert policy id
+ """
+ alert_policy_id = None
+ policies = clc.v2.API.Call('GET', '/v2/alertPolicies/%s' % alias)
+ if not policies:
+ return alert_policy_id
+ for policy in policies.get('items'):
+ if policy.get('name') == alert_policy_name:
+ if not alert_policy_id:
+ alert_policy_id = policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
+ return alert_policy_id
+
+ @staticmethod
+ def _delete_servers(module, clc, server_ids):
+ """
+ Delete the servers on the provided list
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :param server_ids: list of servers to delete
+ :return: a list of dictionaries with server information about the servers that were deleted
+ """
+ terminated_server_ids = []
+ server_dict_array = []
+ request_list = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = clc.v2.Servers(server_ids).Servers()
+ for server in servers:
+ if not module.check_mode:
+ request_list.append(server.Delete())
+ ClcServer._wait_for_requests(module, request_list)
+
+ for server in servers:
+ terminated_server_ids.append(server.id)
+
+ return True, server_dict_array, terminated_server_ids
+
+ @staticmethod
+ def _start_stop_servers(module, clc, server_ids):
+ """
+ Start or Stop the servers on the provided list
+ :param module: the AnsibleModule object
+ :param clc: the clc-sdk instance to use
+ :param server_ids: list of servers to start or stop
+ :return: a list of dictionaries with server information about the servers that were started or stopped
+ """
+ p = module.params
+ state = p.get('state')
+ changed = False
+ changed_servers = []
+ server_dict_array = []
+ result_server_ids = []
+ request_list = []
+
+ if not isinstance(server_ids, list) or len(server_ids) < 1:
+ return module.fail_json(
+ msg='server_ids should be a list of servers, aborting')
+
+ servers = clc.v2.Servers(server_ids).Servers()
+ for server in servers:
+ if server.powerState != state:
+ changed_servers.append(server)
+ if not module.check_mode:
+ request_list.append(
+ ClcServer._change_server_power_state(
+ module,
+ server,
+ state))
+ changed = True
+
+ ClcServer._wait_for_requests(module, request_list)
+ ClcServer._refresh_servers(module, changed_servers)
+
+ for server in set(changed_servers + servers):
+ try:
+ server.data['ipaddress'] = server.details[
+ 'ipAddresses'][0]['internal']
+ server.data['publicip'] = str(
+ server.PublicIPs().public_ips[0])
+ except (KeyError, IndexError):
+ pass
+
+ server_dict_array.append(server.data)
+ result_server_ids.append(server.id)
+
+ return changed, server_dict_array, result_server_ids
+
+ @staticmethod
+ def _change_server_power_state(module, server, state):
+ """
+ Change the server powerState
+ :param module: the module to check for intended state
+ :param server: the server to start or stop
+ :param state: the intended powerState for the server
+ :return: the request object from clc-sdk call
+ """
+ result = None
+ try:
+ if state == 'started':
+ result = server.PowerOn()
+ else:
+ # Try to shut down the server and fall back to power off when unable to shut down.
+ result = server.ShutDown()
+ if result and hasattr(result, 'requests') and result.requests[0]:
+ return result
+ else:
+ result = server.PowerOff()
+ except CLCException:
+ module.fail_json(
+ msg='Unable to change power state for server {0}'.format(
+ server.id))
+ return result
+
+ @staticmethod
+ def _find_running_servers_by_group(module, datacenter, count_group):
+ """
+ Find a list of running servers in the provided group
+ :param module: the AnsibleModule object
+ :param datacenter: the clc-sdk.Datacenter instance to use to lookup the group
+ :param count_group: the group to count the servers
+ :return: list of servers, and list of running servers
+ """
+ group = ClcServer._find_group(
+ module=module,
+ datacenter=datacenter,
+ lookup_group=count_group)
+
+ servers = group.Servers().Servers()
+ running_servers = []
+
+ for server in servers:
+ if server.status == 'active' and server.powerState == 'started':
+ running_servers.append(server)
+
+ return servers, running_servers
+
+ @staticmethod
+ def _find_group(module, datacenter, lookup_group=None):
+ """
+ Find a server group in a datacenter by calling the CLC API
+ :param module: the AnsibleModule instance
+ :param datacenter: clc-sdk.Datacenter instance to search for the group
+ :param lookup_group: string name of the group to search for
+ :return: clc-sdk.Group instance
+ """
+ if not lookup_group:
+ lookup_group = module.params.get('group')
+ try:
+ return datacenter.Groups().Get(lookup_group)
+ except CLCException:
+ pass
+
+ # The search above only acts on the main
+ result = ClcServer._find_group_recursive(
+ module,
+ datacenter.Groups(),
+ lookup_group)
+
+ if result is None:
+ module.fail_json(
+ msg=str(
+ "Unable to find group: " +
+ lookup_group +
+ " in location: " +
+ datacenter.id))
+
+ return result
+
+ @staticmethod
+ def _find_group_recursive(module, group_list, lookup_group):
+ """
+ Find a server group by recursively walking the tree
+ :param module: the AnsibleModule instance to use
+ :param group_list: a list of groups to search
+ :param lookup_group: the group to look for
+ :return: list of groups
+ """
+ result = None
+ for group in group_list.groups:
+ subgroups = group.Subgroups()
+ try:
+ return subgroups.Get(lookup_group)
+ except CLCException:
+ result = ClcServer._find_group_recursive(
+ module,
+ subgroups,
+ lookup_group)
+
+ if result is not None:
+ break
+
+ return result
+
+ @staticmethod
+ def _create_clc_server(
+ clc,
+ module,
+ server_params):
+ """
+ Call the CLC Rest API to Create a Server
+ :param clc: the clc-python-sdk instance to use
+ :param module: the AnsibleModule instance to use
+ :param server_params: a dictionary of params to use to create the servers
+ :return: clc-sdk.Request object linked to the queued server request
+ """
+
+ try:
+ res = clc.v2.API.Call(
+ method='POST',
+ url='servers/%s' %
+ (server_params.get('alias')),
+ payload=json.dumps(
+ {
+ 'name': server_params.get('name'),
+ 'description': server_params.get('description'),
+ 'groupId': server_params.get('group_id'),
+ 'sourceServerId': server_params.get('template'),
+ 'isManagedOS': server_params.get('managed_os'),
+ 'primaryDNS': server_params.get('primary_dns'),
+ 'secondaryDNS': server_params.get('secondary_dns'),
+ 'networkId': server_params.get('network_id'),
+ 'ipAddress': server_params.get('ip_address'),
+ 'password': server_params.get('password'),
+ 'sourceServerPassword': server_params.get('source_server_password'),
+ 'cpu': server_params.get('cpu'),
+ 'cpuAutoscalePolicyId': server_params.get('cpu_autoscale_policy_id'),
+ 'memoryGB': server_params.get('memory'),
+ 'type': server_params.get('type'),
+ 'storageType': server_params.get('storage_type'),
+ 'antiAffinityPolicyId': server_params.get('anti_affinity_policy_id'),
+ 'customFields': server_params.get('custom_fields'),
+ 'additionalDisks': server_params.get('additional_disks'),
+ 'ttl': server_params.get('ttl'),
+ 'packages': server_params.get('packages'),
+ 'configurationId': server_params.get('configuration_id'),
+ 'osType': server_params.get('os_type')}))
+
+ result = clc.v2.Requests(res)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to create the server: {0}. {1}'.format(
+ server_params.get('name'),
+ ex.response_text
+ ))
+
+ #
+ # Patch the Request object so that it returns a valid server
+
+ # Find the server's UUID from the API response
+ server_uuid = [obj['id']
+ for obj in res['links'] if obj['rel'] == 'self'][0]
+
+ # Change the request server method to a _find_server_by_uuid closure so
+ # that it will work
+ result.requests[0].Server = lambda: ClcServer._find_server_by_uuid_w_retry(
+ clc,
+ module,
+ server_uuid,
+ server_params.get('alias'))
+
+ return result
+
+ @staticmethod
+ def _get_anti_affinity_policy_id(clc, module, alias, aa_policy_name):
+ """
+ retrieves the anti affinity policy id of the server based on the name of the policy
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param alias: the CLC account alias
+ :param aa_policy_name: the anti affinity policy name
+ :return: aa_policy_id: The anti affinity policy id
+ """
+ aa_policy_id = None
+ try:
+ aa_policies = clc.v2.API.Call(method='GET',
+ url='antiAffinityPolicies/%s' % alias)
+ except APIFailedResponse as ex:
+ return module.fail_json(msg='Unable to fetch anti affinity policies for account: {0}. {1}'.format(
+ alias, ex.response_text))
+ for aa_policy in aa_policies.get('items'):
+ if aa_policy.get('name') == aa_policy_name:
+ if not aa_policy_id:
+ aa_policy_id = aa_policy.get('id')
+ else:
+ return module.fail_json(
+ msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
+ return aa_policy_id
+
+ #
+ # This is the function that gets patched to the Request.server object using a lamda closure
+ #
+
+ @staticmethod
+ def _find_server_by_uuid_w_retry(
+ clc, module, svr_uuid, alias=None, retries=5, back_out=2):
+ """
+ Find the clc server by the UUID returned from the provisioning request. Retry the request if a 404 is returned.
+ :param clc: the clc-sdk instance to use
+ :param module: the AnsibleModule object
+ :param svr_uuid: UUID of the server
+ :param retries: the number of retry attempts to make prior to fail. default is 5
+ :param alias: the Account Alias to search
+ :return: a clc-sdk.Server instance
+ """
+ if not alias:
+ alias = clc.v2.Account.GetAlias()
+
+ # Wait and retry if the api returns a 404
+ while True:
+ retries -= 1
+ try:
+ server_obj = clc.v2.API.Call(
+ method='GET', url='servers/%s/%s?uuid=true' %
+ (alias, svr_uuid))
+ server_id = server_obj['id']
+ server = clc.v2.Server(
+ id=server_id,
+ alias=alias,
+ server_obj=server_obj)
+ return server
+
+ except APIFailedResponse as e:
+ if e.response_status_code != 404:
+ return module.fail_json(
+ msg='A failure response was received from CLC API when '
+ 'attempting to get details for a server: UUID=%s, Code=%i, Message=%s' %
+ (svr_uuid, e.response_status_code, e.message))
+ if retries == 0:
+ return module.fail_json(
+ msg='Unable to reach the CLC API after 5 attempts')
+ sleep(back_out)
+ back_out *= 2
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ The main function. Instantiates the module and calls process_request.
+ :return: none
+ """
+ argument_dict = ClcServer._define_module_argument_spec()
+ module = AnsibleModule(supports_check_mode=True, **argument_dict)
+ clc_server = ClcServer(module)
+ clc_server.process_request()
+
+from ansible.module_utils.basic import * # pylint: disable=W0614
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/centurylink/clc_server_snapshot.py b/lib/ansible/modules/cloud/centurylink/clc_server_snapshot.py
new file mode 100644
index 0000000000..e176f2d779
--- /dev/null
+++ b/lib/ansible/modules/cloud/centurylink/clc_server_snapshot.py
@@ -0,0 +1,417 @@
+#!/usr/bin/python
+
+#
+# Copyright (c) 2015 CenturyLink
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: clc_server_snapshot
+short_description: Create, Delete and Restore server snapshots in CenturyLink Cloud.
+description:
+ - An Ansible module to Create, Delete and Restore server snapshots in CenturyLink Cloud.
+version_added: "2.0"
+options:
+ server_ids:
+ description:
+ - The list of CLC server Ids.
+ required: True
+ expiration_days:
+ description:
+ - The number of days to keep the server snapshot before it expires.
+ default: 7
+ required: False
+ state:
+ description:
+ - The state to insure that the provided resources are in.
+ default: 'present'
+ required: False
+ choices: ['present', 'absent', 'restore']
+ wait:
+ description:
+ - Whether to wait for the provisioning tasks to finish before returning.
+ default: True
+ required: False
+ choices: [True, False]
+requirements:
+ - python = 2.7
+ - requests >= 2.5.0
+ - clc-sdk
+author: "CLC Runner (@clc-runner)"
+notes:
+ - To use this module, it is required to set the below environment variables which enables access to the
+ Centurylink Cloud
+ - CLC_V2_API_USERNAME, the account login id for the centurylink cloud
+ - CLC_V2_API_PASSWORD, the account password for the centurylink cloud
+ - Alternatively, the module accepts the API token and account alias. The API token can be generated using the
+ CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
+ - CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
+ - CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
+ - Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
+'''
+
+EXAMPLES = '''
+# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
+
+- name: Create server snapshot
+ clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ expiration_days: 10
+ wait: True
+ state: present
+
+- name: Restore server snapshot
+ clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ wait: True
+ state: restore
+
+- name: Delete server snapshot
+ clc_server_snapshot:
+ server_ids:
+ - UC1TEST-SVR01
+ - UC1TEST-SVR02
+ wait: True
+ state: absent
+'''
+
+RETURN = '''
+server_ids:
+ description: The list of server ids that are changed
+ returned: success
+ type: list
+ sample:
+ [
+ "UC1TEST-SVR01",
+ "UC1TEST-SVR02"
+ ]
+'''
+
+__version__ = '${version}'
+
+from distutils.version import LooseVersion
+
+try:
+ import requests
+except ImportError:
+ REQUESTS_FOUND = False
+else:
+ REQUESTS_FOUND = True
+
+#
+# Requires the clc-python-sdk.
+# sudo pip install clc-sdk
+#
+try:
+ import clc as clc_sdk
+ from clc import CLCException
+except ImportError:
+ CLC_FOUND = False
+ clc_sdk = None
+else:
+ CLC_FOUND = True
+
+
+class ClcSnapshot:
+
+ clc = clc_sdk
+ module = None
+
+ def __init__(self, module):
+ """
+ Construct module
+ """
+ self.module = module
+
+ if not CLC_FOUND:
+ self.module.fail_json(
+ msg='clc-python-sdk required for this module')
+ if not REQUESTS_FOUND:
+ self.module.fail_json(
+ msg='requests library is required for this module')
+ if requests.__version__ and LooseVersion(
+ requests.__version__) < LooseVersion('2.5.0'):
+ self.module.fail_json(
+ msg='requests library version should be >= 2.5.0')
+
+ self._set_user_agent(self.clc)
+
+ def process_request(self):
+ """
+ Process the request - Main Code Path
+ :return: Returns with either an exit_json or fail_json
+ """
+ p = self.module.params
+ server_ids = p['server_ids']
+ expiration_days = p['expiration_days']
+ state = p['state']
+ request_list = []
+ changed = False
+ changed_servers = []
+
+ self._set_clc_credentials_from_env()
+ if state == 'present':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_present(
+ server_ids=server_ids,
+ expiration_days=expiration_days)
+ elif state == 'absent':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_absent(
+ server_ids=server_ids)
+ elif state == 'restore':
+ changed, request_list, changed_servers = self.ensure_server_snapshot_restore(
+ server_ids=server_ids)
+
+ self._wait_for_requests_to_complete(request_list)
+ return self.module.exit_json(
+ changed=changed,
+ server_ids=changed_servers)
+
+ def ensure_server_snapshot_present(self, server_ids, expiration_days):
+ """
+ Ensures the given set of server_ids have the snapshots created
+ :param server_ids: The list of server_ids to create the snapshot
+ :param expiration_days: The number of days to keep the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) == 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._create_server_snapshot(server, expiration_days)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _create_server_snapshot(self, server, expiration_days):
+ """
+ Create the snapshot for the CLC server
+ :param server: the CLC server object
+ :param expiration_days: The number of days to keep the snapshot
+ :return: the create request object from CLC API Call
+ """
+ result = None
+ try:
+ result = server.CreateSnapshot(
+ delete_existing=True,
+ expiration_days=expiration_days)
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to create snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_server_snapshot_absent(self, server_ids):
+ """
+ Ensures the given set of server_ids have the snapshots removed
+ :param server_ids: The list of server_ids to delete the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) > 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._delete_server_snapshot(server)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _delete_server_snapshot(self, server):
+ """
+ Delete snapshot for the CLC server
+ :param server: the CLC server object
+ :return: the delete snapshot request object from CLC API
+ """
+ result = None
+ try:
+ result = server.DeleteSnapshot()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to delete snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def ensure_server_snapshot_restore(self, server_ids):
+ """
+ Ensures the given set of server_ids have the snapshots restored
+ :param server_ids: The list of server_ids to delete the snapshot
+ :return: (changed, request_list, changed_servers)
+ changed: A flag indicating whether any change was made
+ request_list: the list of clc request objects from CLC API call
+ changed_servers: The list of servers ids that are modified
+ """
+ request_list = []
+ changed = False
+ servers = self._get_servers_from_clc(
+ server_ids,
+ 'Failed to obtain server list from the CLC API')
+ servers_to_change = [
+ server for server in servers if len(
+ server.GetSnapshots()) > 0]
+ for server in servers_to_change:
+ changed = True
+ if not self.module.check_mode:
+ request = self._restore_server_snapshot(server)
+ request_list.append(request)
+ changed_servers = [
+ server.id for server in servers_to_change if server.id]
+ return changed, request_list, changed_servers
+
+ def _restore_server_snapshot(self, server):
+ """
+ Restore snapshot for the CLC server
+ :param server: the CLC server object
+ :return: the restore snapshot request object from CLC API
+ """
+ result = None
+ try:
+ result = server.RestoreSnapshot()
+ except CLCException as ex:
+ self.module.fail_json(msg='Failed to restore snapshot for server : {0}. {1}'.format(
+ server.id, ex.response_text
+ ))
+ return result
+
+ def _wait_for_requests_to_complete(self, requests_lst):
+ """
+ Waits until the CLC requests are complete if the wait argument is True
+ :param requests_lst: The list of CLC request objects
+ :return: none
+ """
+ if not self.module.params['wait']:
+ return
+ for request in requests_lst:
+ request.WaitUntilComplete()
+ for request_details in request.requests:
+ if request_details.Status() != 'succeeded':
+ self.module.fail_json(
+ msg='Unable to process server snapshot request')
+
+ @staticmethod
+ def define_argument_spec():
+ """
+ This function defines the dictionary object required for
+ package module
+ :return: the package dictionary object
+ """
+ argument_spec = dict(
+ server_ids=dict(type='list', required=True),
+ expiration_days=dict(default=7),
+ wait=dict(default=True),
+ state=dict(
+ default='present',
+ choices=[
+ 'present',
+ 'absent',
+ 'restore']),
+ )
+ return argument_spec
+
+ def _get_servers_from_clc(self, server_list, message):
+ """
+ Internal function to fetch list of CLC server objects from a list of server ids
+ :param server_list: The list of server ids
+ :param message: The error message to throw in case of any error
+ :return the list of CLC server objects
+ """
+ try:
+ return self.clc.v2.Servers(server_list).servers
+ except CLCException as ex:
+ return self.module.fail_json(msg=message + ': %s' % ex)
+
+ def _set_clc_credentials_from_env(self):
+ """
+ Set the CLC Credentials on the sdk by reading environment variables
+ :return: none
+ """
+ env = os.environ
+ v2_api_token = env.get('CLC_V2_API_TOKEN', False)
+ v2_api_username = env.get('CLC_V2_API_USERNAME', False)
+ v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
+ clc_alias = env.get('CLC_ACCT_ALIAS', False)
+ api_url = env.get('CLC_V2_API_URL', False)
+
+ if api_url:
+ self.clc.defaults.ENDPOINT_URL_V2 = api_url
+
+ if v2_api_token and clc_alias:
+ self.clc._LOGIN_TOKEN_V2 = v2_api_token
+ self.clc._V2_ENABLED = True
+ self.clc.ALIAS = clc_alias
+ elif v2_api_username and v2_api_passwd:
+ self.clc.v2.SetCredentials(
+ api_username=v2_api_username,
+ api_passwd=v2_api_passwd)
+ else:
+ return self.module.fail_json(
+ msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
+ "environment variables")
+
+ @staticmethod
+ def _set_user_agent(clc):
+ if hasattr(clc, 'SetRequestsSession'):
+ agent_string = "ClcAnsibleModule/" + __version__
+ ses = requests.Session()
+ ses.headers.update({"Api-Client": agent_string})
+ ses.headers['User-Agent'] += " " + agent_string
+ clc.SetRequestsSession(ses)
+
+
+def main():
+ """
+ Main function
+ :return: None
+ """
+ module = AnsibleModule(
+ argument_spec=ClcSnapshot.define_argument_spec(),
+ supports_check_mode=True
+ )
+ clc_snapshot = ClcSnapshot(module)
+ clc_snapshot.process_request()
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/__init__.py b/lib/ansible/modules/cloud/cloudstack/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/__init__.py
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_account.py b/lib/ansible/modules/cloud/cloudstack/cs_account.py
new file mode 100644
index 0000000000..0074ad29ca
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_account.py
@@ -0,0 +1,385 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_account
+short_description: Manages accounts on Apache CloudStack based clouds.
+description:
+ - Create, disable, lock, enable and remove accounts.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of account.
+ required: true
+ username:
+ description:
+ - Username of the user to be created if account did not exist.
+ - Required on C(state=present).
+ required: false
+ default: null
+ password:
+ description:
+ - Password of the user to be created if account did not exist.
+ - Required on C(state=present).
+ required: false
+ default: null
+ first_name:
+ description:
+ - First name of the user to be created if account did not exist.
+ - Required on C(state=present).
+ required: false
+ default: null
+ last_name:
+ description:
+ - Last name of the user to be created if account did not exist.
+ - Required on C(state=present).
+ required: false
+ default: null
+ email:
+ description:
+ - Email of the user to be created if account did not exist.
+ - Required on C(state=present).
+ required: false
+ default: null
+ timezone:
+ description:
+ - Timezone of the user to be created if account did not exist.
+ required: false
+ default: null
+ network_domain:
+ description:
+ - Network domain of the account.
+ required: false
+ default: null
+ account_type:
+ description:
+ - Type of the account.
+ required: false
+ default: 'user'
+ choices: [ 'user', 'root_admin', 'domain_admin' ]
+ domain:
+ description:
+ - Domain the account is related to.
+ required: false
+ default: 'ROOT'
+ state:
+ description:
+ - State of the account.
+ - C(unlocked) is an alias for C(enabled).
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked' ]
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# create an account in domain 'CUSTOMERS'
+local_action:
+ module: cs_account
+ name: customer_xy
+ username: customer_xy
+ password: S3Cur3
+ last_name: Doe
+ first_name: John
+ email: john.doe@example.com
+ domain: CUSTOMERS
+
+# Lock an existing account in domain 'CUSTOMERS'
+local_action:
+ module: cs_account
+ name: customer_xy
+ domain: CUSTOMERS
+ state: locked
+
+# Disable an existing account in domain 'CUSTOMERS'
+local_action:
+ module: cs_account
+ name: customer_xy
+ domain: CUSTOMERS
+ state: disabled
+
+# Enable an existing account in domain 'CUSTOMERS'
+local_action:
+ module: cs_account
+ name: customer_xy
+ domain: CUSTOMERS
+ state: enabled
+
+# Remove an account in domain 'CUSTOMERS'
+local_action:
+ module: cs_account
+ name: customer_xy
+ domain: CUSTOMERS
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the account.
+ returned: success
+ type: string
+ sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
+name:
+ description: Name of the account.
+ returned: success
+ type: string
+ sample: linus@example.com
+account_type:
+ description: Type of the account.
+ returned: success
+ type: string
+ sample: user
+state:
+ description: State of the account.
+ returned: success
+ type: string
+ sample: enabled
+network_domain:
+ description: Network domain of the account.
+ returned: success
+ type: string
+ sample: example.local
+domain:
+ description: Domain the account is related.
+ returned: success
+ type: string
+ sample: ROOT
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackAccount(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackAccount, self).__init__(module)
+ self.returns = {
+ 'networkdomain': 'network_domain',
+ }
+ self.account = None
+ self.account_types = {
+ 'user': 0,
+ 'root_admin': 1,
+ 'domain_admin': 2,
+ }
+
+ def get_account_type(self):
+ account_type = self.module.params.get('account_type')
+ return self.account_types[account_type]
+
+ def get_account(self):
+ if not self.account:
+ args = {
+ 'listall': True,
+ 'domainid': self.get_domain(key='id'),
+ }
+ accounts = self.cs.listAccounts(**args)
+ if accounts:
+ account_name = self.module.params.get('name')
+ for a in accounts['account']:
+ if account_name == a['name']:
+ self.account = a
+ break
+
+ return self.account
+
+ def enable_account(self):
+ account = self.get_account()
+ if not account:
+ account = self.present_account()
+
+ if account['state'].lower() != 'enabled':
+ self.result['changed'] = True
+ args = {
+ 'id': account['id'],
+ 'account': self.module.params.get('name'),
+ 'domainid': self.get_domain(key='id')
+ }
+ if not self.module.check_mode:
+ res = self.cs.enableAccount(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ account = res['account']
+ return account
+
+ def lock_account(self):
+ return self.lock_or_disable_account(lock=True)
+
+ def disable_account(self):
+ return self.lock_or_disable_account()
+
+ def lock_or_disable_account(self, lock=False):
+ account = self.get_account()
+ if not account:
+ account = self.present_account()
+
+ # we need to enable the account to lock it.
+ if lock and account['state'].lower() == 'disabled':
+ account = self.enable_account()
+
+ if (lock and account['state'].lower() != 'locked' or
+ not lock and account['state'].lower() != 'disabled'):
+ self.result['changed'] = True
+ args = {
+ 'id': account['id'],
+ 'account': self.module.params.get('name'),
+ 'domainid': self.get_domain(key='id'),
+ 'lock': lock,
+ }
+ if not self.module.check_mode:
+ account = self.cs.disableAccount(**args)
+
+ if 'errortext' in account:
+ self.module.fail_json(msg="Failed: '%s'" % account['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ account = self.poll_job(account, 'account')
+ return account
+
+ def present_account(self):
+ required_params = [
+ 'email',
+ 'username',
+ 'password',
+ 'first_name',
+ 'last_name',
+ ]
+ self.module.fail_on_missing_params(required_params=required_params)
+
+ account = self.get_account()
+
+ if not account:
+ self.result['changed'] = True
+
+ args = {
+ 'account': self.module.params.get('name'),
+ 'domainid': self.get_domain(key='id'),
+ 'accounttype': self.get_account_type(),
+ 'networkdomain': self.module.params.get('network_domain'),
+ 'username': self.module.params.get('username'),
+ 'password': self.module.params.get('password'),
+ 'firstname': self.module.params.get('first_name'),
+ 'lastname': self.module.params.get('last_name'),
+ 'email': self.module.params.get('email'),
+ 'timezone': self.module.params.get('timezone')
+ }
+ if not self.module.check_mode:
+ res = self.cs.createAccount(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ account = res['account']
+ return account
+
+ def absent_account(self):
+ account = self.get_account()
+ if account:
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ res = self.cs.deleteAccount(id=account['id'])
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ self.poll_job(res, 'account')
+ return account
+
+ def get_result(self, account):
+ super(AnsibleCloudStackAccount, self).get_result(account)
+ if account:
+ if 'accounttype' in account:
+ for key, value in self.account_types.items():
+ if value == account['accounttype']:
+ self.result['account_type'] = key
+ break
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True),
+ state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked'], default='present'),
+ account_type=dict(choices=['user', 'root_admin', 'domain_admin'], default='user'),
+ network_domain=dict(default=None),
+ domain=dict(default='ROOT'),
+ email=dict(default=None),
+ first_name=dict(default=None),
+ last_name=dict(default=None),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ timezone=dict(default=None),
+ poll_async=dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_acc = AnsibleCloudStackAccount(module)
+
+ state = module.params.get('state')
+
+ if state in ['absent']:
+ account = acs_acc.absent_account()
+
+ elif state in ['enabled', 'unlocked']:
+ account = acs_acc.enable_account()
+
+ elif state in ['disabled']:
+ account = acs_acc.disable_account()
+
+ elif state in ['locked']:
+ account = acs_acc.lock_account()
+
+ else:
+ account = acs_acc.present_account()
+
+ result = acs_acc.get_result(account)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_affinitygroup.py b/lib/ansible/modules/cloud/cloudstack/cs_affinitygroup.py
new file mode 100644
index 0000000000..a9c71c42b0
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_affinitygroup.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_affinitygroup
+short_description: Manages affinity groups on Apache CloudStack based clouds.
+description:
+ - Create and remove affinity groups.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the affinity group.
+ required: true
+ affinty_type:
+ description:
+ - Type of the affinity group. If not specified, first found affinity type is used.
+ required: false
+ default: null
+ description:
+ description:
+ - Description of the affinity group.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the affinity group.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ domain:
+ description:
+ - Domain the affinity group is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the affinity group is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the affinity group is related to.
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create a affinity group
+- local_action:
+ module: cs_affinitygroup
+ name: haproxy
+ affinty_type: host anti-affinity
+
+# Remove a affinity group
+- local_action:
+ module: cs_affinitygroup
+ name: haproxy
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the affinity group.
+ returned: success
+ type: string
+ sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
+name:
+ description: Name of affinity group.
+ returned: success
+ type: string
+ sample: app
+description:
+ description: Description of affinity group.
+ returned: success
+ type: string
+ sample: application affinity group
+affinity_type:
+ description: Type of affinity group.
+ returned: success
+ type: string
+ sample: host anti-affinity
+project:
+ description: Name of project the affinity group is related to.
+ returned: success
+ type: string
+ sample: Production
+domain:
+ description: Domain the affinity group is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the affinity group is related to.
+ returned: success
+ type: string
+ sample: example account
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackAffinityGroup(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackAffinityGroup, self).__init__(module)
+ self.returns = {
+ 'type': 'affinity_type',
+ }
+ self.affinity_group = None
+
+ def get_affinity_group(self):
+ if not self.affinity_group:
+
+ args = {
+ 'projectid': self.get_project(key='id'),
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'name': self.module.params.get('name'),
+ }
+ affinity_groups = self.cs.listAffinityGroups(**args)
+ if affinity_groups:
+ self.affinity_group = affinity_groups['affinitygroup'][0]
+ return self.affinity_group
+
+ def get_affinity_type(self):
+ affinity_type = self.module.params.get('affinty_type')
+
+ affinity_types = self.cs.listAffinityGroupTypes()
+ if affinity_types:
+ if not affinity_type:
+ return affinity_types['affinityGroupType'][0]['type']
+
+ for a in affinity_types['affinityGroupType']:
+ if a['type'] == affinity_type:
+ return a['type']
+ self.module.fail_json(msg="affinity group type '%s' not found" % affinity_type)
+
+ def create_affinity_group(self):
+ affinity_group = self.get_affinity_group()
+ if not affinity_group:
+ self.result['changed'] = True
+
+ args = {
+ 'name': self.module.params.get('name'),
+ 'type': self.get_affinity_type(),
+ 'description': self.module.params.get('description'),
+ 'projectid': self.get_project(key='id'),
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ }
+ if not self.module.check_mode:
+ res = self.cs.createAffinityGroup(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ affinity_group = self.poll_job(res, 'affinitygroup')
+ return affinity_group
+
+ def remove_affinity_group(self):
+ affinity_group = self.get_affinity_group()
+ if affinity_group:
+ self.result['changed'] = True
+
+ args = {
+ 'name': self.module.params.get('name'),
+ 'projectid': self.get_project(key='id'),
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ }
+ if not self.module.check_mode:
+ res = self.cs.deleteAffinityGroup(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ self.poll_job(res, 'affinitygroup')
+ return affinity_group
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True),
+ affinty_type=dict(default=None),
+ description=dict(default=None),
+ state=dict(choices=['present', 'absent'], default='present'),
+ domain=dict(default=None),
+ account=dict(default=None),
+ project=dict(default=None),
+ poll_async=dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_ag = AnsibleCloudStackAffinityGroup(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ affinity_group = acs_ag.remove_affinity_group()
+ else:
+ affinity_group = acs_ag.create_affinity_group()
+
+ result = acs_ag.get_result(affinity_group)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_cluster.py b/lib/ansible/modules/cloud/cloudstack/cs_cluster.py
new file mode 100644
index 0000000000..7c9d39e614
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_cluster.py
@@ -0,0 +1,421 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_cluster
+short_description: Manages host clusters on Apache CloudStack based clouds.
+description:
+ - Create, update and remove clusters.
+version_added: "2.1"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - name of the cluster.
+ required: true
+ zone:
+ description:
+ - Name of the zone in which the cluster belongs to.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ pod:
+ description:
+ - Name of the pod in which the cluster belongs to.
+ required: false
+ default: null
+ cluster_type:
+ description:
+ - Type of the cluster.
+ - Required if C(state=present)
+ required: false
+ default: null
+ choices: [ 'CloudManaged', 'ExternalManaged' ]
+ hypervisor:
+ description:
+ - Name the hypervisor to be used.
+ - Required if C(state=present).
+ required: false
+ default: none
+ choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ]
+ url:
+ description:
+ - URL for the cluster
+ required: false
+ default: null
+ username:
+ description:
+ - Username for the cluster.
+ required: false
+ default: null
+ password:
+ description:
+ - Password for the cluster.
+ required: false
+ default: null
+ guest_vswitch_name:
+ description:
+ - Name of virtual switch used for guest traffic in the cluster.
+ - This would override zone wide traffic label setting.
+ required: false
+ default: null
+ guest_vswitch_type:
+ description:
+ - Type of virtual switch used for guest traffic in the cluster.
+ - Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch)
+ required: false
+ default: null
+ choices: [ 'vmwaresvs', 'vmwaredvs' ]
+ public_vswitch_name:
+ description:
+ - Name of virtual switch used for public traffic in the cluster.
+ - This would override zone wide traffic label setting.
+ required: false
+ default: null
+ public_vswitch_type:
+ description:
+ - Type of virtual switch used for public traffic in the cluster.
+ - Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch)
+ required: false
+ default: null
+ choices: [ 'vmwaresvs', 'vmwaredvs' ]
+ vms_ip_address:
+ description:
+ - IP address of the VSM associated with this cluster.
+ required: false
+ default: null
+ vms_username:
+ description:
+ - Username for the VSM associated with this cluster.
+ required: false
+ default: null
+ vms_password:
+ description:
+ - Password for the VSM associated with this cluster.
+ required: false
+ default: null
+ ovm3_cluster:
+ description:
+ - Ovm3 native OCFS2 clustering enabled for cluster.
+ required: false
+ default: null
+ ovm3_pool:
+ description:
+ - Ovm3 native pooling enabled for cluster.
+ required: false
+ default: null
+ ovm3_vip:
+ description:
+ - Ovm3 vip to use for pool (and cluster).
+ required: false
+ default: null
+ state:
+ description:
+ - State of the cluster.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent', 'disabled', 'enabled' ]
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Ensure a cluster is present
+- local_action:
+ module: cs_cluster
+ name: kvm-cluster-01
+ zone: ch-zrh-ix-01
+ hypervisor: KVM
+ cluster_type: CloudManaged
+
+# Ensure a cluster is disabled
+- local_action:
+ module: cs_cluster
+ name: kvm-cluster-01
+ zone: ch-zrh-ix-01
+ state: disabled
+
+# Ensure a cluster is enabled
+- local_action:
+ module: cs_cluster
+ name: kvm-cluster-01
+ zone: ch-zrh-ix-01
+ state: enabled
+
+# Ensure a cluster is absent
+- local_action:
+ module: cs_cluster
+ name: kvm-cluster-01
+ zone: ch-zrh-ix-01
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the cluster.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the cluster.
+ returned: success
+ type: string
+ sample: cluster01
+allocation_state:
+ description: State of the cluster.
+ returned: success
+ type: string
+ sample: Enabled
+cluster_type:
+ description: Type of the cluster.
+ returned: success
+ type: string
+ sample: ExternalManaged
+cpu_overcommit_ratio:
+ description: The CPU overcommit ratio of the cluster.
+ returned: success
+ type: string
+ sample: 1.0
+memory_overcommit_ratio:
+ description: The memory overcommit ratio of the cluster.
+ returned: success
+ type: string
+ sample: 1.0
+managed_state:
+ description: Whether this cluster is managed by CloudStack.
+ returned: success
+ type: string
+ sample: Managed
+ovm3_vip:
+ description: Ovm3 VIP to use for pooling and/or clustering
+ returned: success
+ type: string
+ sample: 10.10.10.101
+hypervisor:
+ description: Hypervisor of the cluster
+ returned: success
+ type: string
+ sample: VMware
+zone:
+ description: Name of zone the cluster is in.
+ returned: success
+ type: string
+ sample: ch-gva-2
+pod:
+ description: Name of pod the cluster is in.
+ returned: success
+ type: string
+ sample: pod01
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackCluster(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackCluster, self).__init__(module)
+ self.returns = {
+ 'allocationstate': 'allocation_state',
+ 'hypervisortype': 'hypervisor',
+ 'clustertype': 'cluster_type',
+ 'podname': 'pod',
+ 'managedstate': 'managed_state',
+ 'memoryovercommitratio': 'memory_overcommit_ratio',
+ 'cpuovercommitratio': 'cpu_overcommit_ratio',
+ 'ovm3vip': 'ovm3_vip',
+ }
+ self.cluster = None
+
+ def _get_common_cluster_args(self):
+ args = {
+ 'clustername': self.module.params.get('name'),
+ 'hypervisor': self.module.params.get('hypervisor'),
+ 'clustertype': self.module.params.get('cluster_type'),
+ }
+ state = self.module.params.get('state')
+ if state in ['enabled', 'disabled']:
+ args['allocationstate'] = state.capitalize()
+ return args
+
+ def get_pod(self, key=None):
+ args = {
+ 'name': self.module.params.get('pod'),
+ 'zoneid': self.get_zone(key='id'),
+ }
+ pods = self.cs.listPods(**args)
+ if pods:
+ return self._get_by_key(key, pods['pod'][0])
+ self.module.fail_json(msg="Pod %s not found in zone %s." % (self.module.params.get('pod'), self.get_zone(key='name')))
+
+ def get_cluster(self):
+ if not self.cluster:
+ args = {}
+
+ uuid = self.module.params.get('id')
+ if uuid:
+ args['id'] = uuid
+ clusters = self.cs.listClusters(**args)
+ if clusters:
+ self.cluster = clusters['cluster'][0]
+ return self.cluster
+
+ args['name'] = self.module.params.get('name')
+ clusters = self.cs.listClusters(**args)
+ if clusters:
+ self.cluster = clusters['cluster'][0]
+ # fix differnt return from API then request argument given
+ self.cluster['hypervisor'] = self.cluster['hypervisortype']
+ self.cluster['clustername'] = self.cluster['name']
+ return self.cluster
+
+ def present_cluster(self):
+ cluster = self.get_cluster()
+ if cluster:
+ cluster = self._update_cluster()
+ else:
+ cluster = self._create_cluster()
+ return cluster
+
+ def _create_cluster(self):
+ required_params = [
+ 'cluster_type',
+ 'hypervisor',
+ ]
+ self.module.fail_on_missing_params(required_params=required_params)
+
+ args = self._get_common_cluster_args()
+ args['zoneid'] = self.get_zone(key='id')
+ args['podid'] = self.get_pod(key='id')
+ args['url'] = self.module.params.get('url')
+ args['username'] = self.module.params.get('username')
+ args['password'] = self.module.params.get('password')
+ args['guestvswitchname'] = self.module.params.get('guest_vswitch_name')
+ args['guestvswitchtype'] = self.module.params.get('guest_vswitch_type')
+ args['publicvswitchtype'] = self.module.params.get('public_vswitch_name')
+ args['publicvswitchtype'] = self.module.params.get('public_vswitch_type')
+ args['vsmipaddress'] = self.module.params.get('vms_ip_address')
+ args['vsmusername'] = self.module.params.get('vms_username')
+ args['vmspassword'] = self.module.params.get('vms_password')
+ args['ovm3cluster'] = self.module.params.get('ovm3_cluster')
+ args['ovm3pool'] = self.module.params.get('ovm3_pool')
+ args['ovm3vip'] = self.module.params.get('ovm3_vip')
+
+ self.result['changed'] = True
+
+ cluster = None
+ if not self.module.check_mode:
+ res = self.cs.addCluster(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ # API returns a list as result CLOUDSTACK-9205
+ if isinstance(res['cluster'], list):
+ cluster = res['cluster'][0]
+ else:
+ cluster = res['cluster']
+ return cluster
+
+ def _update_cluster(self):
+ cluster = self.get_cluster()
+
+ args = self._get_common_cluster_args()
+ args['id'] = cluster['id']
+
+ if self.has_changed(args, cluster):
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ res = self.cs.updateCluster(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ cluster = res['cluster']
+ return cluster
+
+ def absent_cluster(self):
+ cluster = self.get_cluster()
+ if cluster:
+ self.result['changed'] = True
+
+ args = {
+ 'id': cluster['id'],
+ }
+ if not self.module.check_mode:
+ res = self.cs.deleteCluster(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ return cluster
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True),
+ zone=dict(default=None),
+ pod=dict(default=None),
+ cluster_type=dict(choices=['CloudManaged', 'ExternalManaged'], default=None),
+ hypervisor=dict(choices=CS_HYPERVISORS, default=None),
+ state=dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'),
+ url=dict(default=None),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ guest_vswitch_name=dict(default=None),
+ guest_vswitch_type=dict(choices=['vmwaresvs', 'vmwaredvs'], default=None),
+ public_vswitch_name=dict(default=None),
+ public_vswitch_type=dict(choices=['vmwaresvs', 'vmwaredvs'], default=None),
+ vms_ip_address=dict(default=None),
+ vms_username=dict(default=None),
+ vms_password=dict(default=None, no_log=True),
+ ovm3_cluster=dict(default=None),
+ ovm3_pool=dict(default=None),
+ ovm3_vip=dict(default=None),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_cluster = AnsibleCloudStackCluster(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ cluster = acs_cluster.absent_cluster()
+ else:
+ cluster = acs_cluster.present_cluster()
+
+ result = acs_cluster.get_result(cluster)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_configuration.py b/lib/ansible/modules/cloud/cloudstack/cs_configuration.py
new file mode 100644
index 0000000000..696593550a
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_configuration.py
@@ -0,0 +1,292 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_configuration
+short_description: Manages configuration on Apache CloudStack based clouds.
+description:
+ - Manages global, zone, account, storage and cluster configurations.
+version_added: "2.1"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the configuration.
+ required: true
+ value:
+ description:
+ - Value of the configuration.
+ required: true
+ account:
+ description:
+ - Ensure the value for corresponding account.
+ required: false
+ default: null
+ domain:
+ description:
+ - Domain the account is related to.
+ - Only considered if C(account) is used.
+ required: false
+ default: ROOT
+ zone:
+ description:
+ - Ensure the value for corresponding zone.
+ required: false
+ default: null
+ storage:
+ description:
+ - Ensure the value for corresponding storage pool.
+ required: false
+ default: null
+ cluster:
+ description:
+ - Ensure the value for corresponding cluster.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Ensure global configuration
+- local_action:
+ module: cs_configuration
+ name: router.reboot.when.outofband.migrated
+ value: false
+
+# Ensure zone configuration
+- local_action:
+ module: cs_configuration
+ name: router.reboot.when.outofband.migrated
+ zone: ch-gva-01
+ value: true
+
+# Ensure storage configuration
+- local_action:
+ module: cs_configuration
+ name: storage.overprovisioning.factor
+ storage: storage01
+ value: 2.0
+
+# Ensure account configuration
+- local_action:
+ module: cs_configuration:
+ name: allow.public.user.templates
+ value: false
+ account: acme inc
+ domain: customers
+'''
+
+RETURN = '''
+---
+category:
+ description: Category of the configuration.
+ returned: success
+ type: string
+ sample: Advanced
+scope:
+ description: Scope (zone/cluster/storagepool/account) of the parameter that needs to be updated.
+ returned: success
+ type: string
+ sample: storagepool
+description:
+ description: Description of the configuration.
+ returned: success
+ type: string
+ sample: Setup the host to do multipath
+name:
+ description: Name of the configuration.
+ returned: success
+ type: string
+ sample: zone.vlan.capacity.notificationthreshold
+value:
+ description: Value of the configuration.
+ returned: success
+ type: string
+ sample: "0.75"
+account:
+ description: Account of the configuration.
+ returned: success
+ type: string
+ sample: admin
+Domain:
+ description: Domain of account of the configuration.
+ returned: success
+ type: string
+ sample: ROOT
+zone:
+ description: Zone of the configuration.
+ returned: success
+ type: string
+ sample: ch-gva-01
+cluster:
+ description: Cluster of the configuration.
+ returned: success
+ type: string
+ sample: cluster01
+storage:
+ description: Storage of the configuration.
+ returned: success
+ type: string
+ sample: storage01
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackConfiguration(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackConfiguration, self).__init__(module)
+ self.returns = {
+ 'category': 'category',
+ 'scope': 'scope',
+ 'value': 'value',
+ }
+ self.storage = None
+ self.account = None
+ self.cluster = None
+
+
+ def _get_common_configuration_args(self):
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['accountid'] = self.get_account(key='id')
+ args['storageid'] = self.get_storage(key='id')
+ args['zoneid'] = self.get_zone(key='id')
+ args['clusterid'] = self.get_cluster(key='id')
+ return args
+
+
+ def get_zone(self, key=None):
+ # make sure we do net use the default zone
+ zone = self.module.params.get('zone')
+ if zone:
+ return super(AnsibleCloudStackConfiguration, self).get_zone(key=key)
+
+
+ def get_cluster(self, key=None):
+ if not self.cluster:
+ cluster_name = self.module.params.get('cluster')
+ if not cluster_name:
+ return None
+ args = {}
+ args['name'] = cluster_name
+ clusters = self.cs.listClusters(**args)
+ if clusters:
+ self.cluster = clusters['cluster'][0]
+ self.result['cluster'] = self.cluster['name']
+ else:
+ self.module.fail_json(msg="Cluster %s not found." % cluster_name)
+ return self._get_by_key(key=key, my_dict=self.cluster)
+
+
+ def get_storage(self, key=None):
+ if not self.storage:
+ storage_pool_name = self.module.params.get('storage')
+ if not storage_pool_name:
+ return None
+ args = {}
+ args['name'] = storage_pool_name
+ storage_pools = self.cs.listStoragePools(**args)
+ if storage_pools:
+ self.storage = storage_pools['storagepool'][0]
+ self.result['storage'] = self.storage['name']
+ else:
+ self.module.fail_json(msg="Storage pool %s not found." % storage_pool_name)
+ return self._get_by_key(key=key, my_dict=self.storage)
+
+
+ def get_configuration(self):
+ configuration = None
+ args = self._get_common_configuration_args()
+ configurations = self.cs.listConfigurations(**args)
+ if not configurations:
+ self.module.fail_json(msg="Configuration %s not found." % args['name'])
+ configuration = configurations['configuration'][0]
+ return configuration
+
+
+ def get_value(self):
+ value = str(self.module.params.get('value'))
+ if value in ('True', 'False'):
+ value = value.lower()
+ return value
+
+
+ def present_configuration(self):
+ configuration = self.get_configuration()
+ args = self._get_common_configuration_args()
+ args['value'] = self.get_value()
+ if self.has_changed(args, configuration, ['value']):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.updateConfiguration(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ configuration = res['configuration']
+ return configuration
+
+
+ def get_result(self, configuration):
+ self.result = super(AnsibleCloudStackConfiguration, self).get_result(configuration)
+ if self.account:
+ self.result['account'] = self.account['name']
+ self.result['domain'] = self.domain['path']
+ elif self.zone:
+ self.result['zone'] = self.zone['name']
+ return self.result
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ value = dict(type='str', required=True),
+ zone = dict(default=None),
+ storage = dict(default=None),
+ cluster = dict(default=None),
+ account = dict(default=None),
+ domain = dict(default='ROOT')
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_configuration = AnsibleCloudStackConfiguration(module)
+ configuration = acs_configuration.present_configuration()
+ result = acs_configuration.get_result(configuration)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_domain.py b/lib/ansible/modules/cloud/cloudstack/cs_domain.py
new file mode 100644
index 0000000000..35e32aa066
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_domain.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_domain
+short_description: Manages domains on Apache CloudStack based clouds.
+description:
+ - Create, update and remove domains.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ path:
+ description:
+ - Path of the domain.
+ - Prefix C(ROOT/) or C(/ROOT/) in path is optional.
+ required: true
+ network_domain:
+ description:
+ - Network domain for networks in the domain.
+ required: false
+ default: null
+ clean_up:
+ description:
+ - Clean up all domain resources like child domains and accounts.
+ - Considered on C(state=absent).
+ required: false
+ default: false
+ state:
+ description:
+ - State of the domain.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create a domain
+local_action:
+ module: cs_domain
+ path: ROOT/customers
+ network_domain: customers.example.com
+
+# Create another subdomain
+local_action:
+ module: cs_domain
+ path: ROOT/customers/xy
+ network_domain: xy.customers.example.com
+
+# Remove a domain
+local_action:
+ module: cs_domain
+ path: ROOT/customers/xy
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the domain.
+ returned: success
+ type: string
+ sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
+name:
+ description: Name of the domain.
+ returned: success
+ type: string
+ sample: customers
+path:
+ description: Domain path.
+ returned: success
+ type: string
+ sample: /ROOT/customers
+parent_domain:
+ description: Parent domain of the domain.
+ returned: success
+ type: string
+ sample: ROOT
+network_domain:
+ description: Network domain of the domain.
+ returned: success
+ type: string
+ sample: example.local
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackDomain(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackDomain, self).__init__(module)
+ self.returns = {
+ 'path': 'path',
+ 'networkdomain': 'network_domain',
+ 'parentdomainname': 'parent_domain',
+ }
+ self.domain = None
+
+
+ def _get_domain_internal(self, path=None):
+ if not path:
+ path = self.module.params.get('path')
+
+ if path.endswith('/'):
+ self.module.fail_json(msg="Path '%s' must not end with /" % path)
+
+ path = path.lower()
+
+ if path.startswith('/') and not path.startswith('/root/'):
+ path = "root" + path
+ elif not path.startswith('root/'):
+ path = "root/" + path
+
+ args = {}
+ args['listall'] = True
+
+ domains = self.cs.listDomains(**args)
+ if domains:
+ for d in domains['domain']:
+ if path == d['path'].lower():
+ return d
+ return None
+
+
+ def get_name(self):
+ # last part of the path is the name
+ name = self.module.params.get('path').split('/')[-1:]
+ return name
+
+
+ def get_domain(self, key=None):
+ if not self.domain:
+ self.domain = self._get_domain_internal()
+ return self._get_by_key(key, self.domain)
+
+
+ def get_parent_domain(self, key=None):
+ path = self.module.params.get('path')
+ # cut off last /*
+ path = '/'.join(path.split('/')[:-1])
+ if not path:
+ return None
+ parent_domain = self._get_domain_internal(path=path)
+ if not parent_domain:
+ self.module.fail_json(msg="Parent domain path %s does not exist" % path)
+ return self._get_by_key(key, parent_domain)
+
+
+ def present_domain(self):
+ domain = self.get_domain()
+ if not domain:
+ domain = self.create_domain(domain)
+ else:
+ domain = self.update_domain(domain)
+ return domain
+
+
+ def create_domain(self, domain):
+ self.result['changed'] = True
+
+ args = {}
+ args['name'] = self.get_name()
+ args['parentdomainid'] = self.get_parent_domain(key='id')
+ args['networkdomain'] = self.module.params.get('network_domain')
+
+ if not self.module.check_mode:
+ res = self.cs.createDomain(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ domain = res['domain']
+ return domain
+
+
+ def update_domain(self, domain):
+ args = {}
+ args['id'] = domain['id']
+ args['networkdomain'] = self.module.params.get('network_domain')
+
+ if self.has_changed(args, domain):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.updateDomain(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ domain = res['domain']
+ return domain
+
+
+ def absent_domain(self):
+ domain = self.get_domain()
+ if domain:
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ args = {}
+ args['id'] = domain['id']
+ args['cleanup'] = self.module.params.get('clean_up')
+ res = self.cs.deleteDomain(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ res = self.poll_job(res, 'domain')
+ return domain
+
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ path = dict(required=True),
+ state = dict(choices=['present', 'absent'], default='present'),
+ network_domain = dict(default=None),
+ clean_up = dict(type='bool', default=False),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_dom = AnsibleCloudStackDomain(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ domain = acs_dom.absent_domain()
+ else:
+ domain = acs_dom.present_domain()
+
+ result = acs_dom.get_result(domain)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_facts.py b/lib/ansible/modules/cloud/cloudstack/cs_facts.py
new file mode 100644
index 0000000000..6f51127df6
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_facts.py
@@ -0,0 +1,226 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_facts
+short_description: Gather facts on instances of Apache CloudStack based clouds.
+description:
+ - This module fetches data from the metadata API in CloudStack. The module must be called from within the instance itself.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ filter:
+ description:
+ - Filter for a specific fact.
+ required: false
+ default: null
+ choices:
+ - cloudstack_service_offering
+ - cloudstack_availability_zone
+ - cloudstack_public_hostname
+ - cloudstack_public_ipv4
+ - cloudstack_local_hostname
+ - cloudstack_local_ipv4
+ - cloudstack_instance_id
+ - cloudstack_user_data
+requirements: [ 'yaml' ]
+'''
+
+EXAMPLES = '''
+# Gather all facts on instances
+- name: Gather cloudstack facts
+ cs_facts:
+
+# Gather specific fact on instances
+- name: Gather cloudstack facts
+ cs_facts: filter=cloudstack_instance_id
+'''
+
+RETURN = '''
+---
+cloudstack_availability_zone:
+ description: zone the instance is deployed in.
+ returned: success
+ type: string
+ sample: ch-gva-2
+cloudstack_instance_id:
+ description: UUID of the instance.
+ returned: success
+ type: string
+ sample: ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
+cloudstack_local_hostname:
+ description: local hostname of the instance.
+ returned: success
+ type: string
+ sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
+cloudstack_local_ipv4:
+ description: local IPv4 of the instance.
+ returned: success
+ type: string
+ sample: 185.19.28.35
+cloudstack_public_hostname:
+ description: public IPv4 of the router. Same as C(cloudstack_public_ipv4).
+ returned: success
+ type: string
+ sample: VM-ab4e80b0-3e7e-4936-bdc5-e334ba5b0139
+cloudstack_public_ipv4:
+ description: public IPv4 of the router.
+ returned: success
+ type: string
+ sample: 185.19.28.35
+cloudstack_service_offering:
+ description: service offering of the instance.
+ returned: success
+ type: string
+ sample: Micro 512mb 1cpu
+cloudstack_user_data:
+ description: data of the instance provided by users.
+ returned: success
+ type: dict
+ sample: { "bla": "foo" }
+'''
+
+import os
+
+try:
+ import yaml
+ has_lib_yaml = True
+except ImportError:
+ has_lib_yaml = False
+
+CS_METADATA_BASE_URL = "http://%s/latest/meta-data"
+CS_USERDATA_BASE_URL = "http://%s/latest/user-data"
+
+class CloudStackFacts(object):
+
+ def __init__(self):
+ self.facts = ansible_facts(module)
+ self.api_ip = None
+ self.fact_paths = {
+ 'cloudstack_service_offering': 'service-offering',
+ 'cloudstack_availability_zone': 'availability-zone',
+ 'cloudstack_public_hostname': 'public-hostname',
+ 'cloudstack_public_ipv4': 'public-ipv4',
+ 'cloudstack_local_hostname': 'local-hostname',
+ 'cloudstack_local_ipv4': 'local-ipv4',
+ 'cloudstack_instance_id': 'instance-id'
+ }
+
+ def run(self):
+ result = {}
+ filter = module.params.get('filter')
+ if not filter:
+ for key,path in self.fact_paths.iteritems():
+ result[key] = self._fetch(CS_METADATA_BASE_URL + "/" + path)
+ result['cloudstack_user_data'] = self._get_user_data_json()
+ else:
+ if filter == 'cloudstack_user_data':
+ result['cloudstack_user_data'] = self._get_user_data_json()
+ elif filter in self.fact_paths:
+ result[filter] = self._fetch(CS_METADATA_BASE_URL + "/" + self.fact_paths[filter])
+ return result
+
+
+ def _get_user_data_json(self):
+ try:
+ # this data come form users, we try what we can to parse it...
+ return yaml.load(self._fetch(CS_USERDATA_BASE_URL))
+ except:
+ return None
+
+
+ def _fetch(self, path):
+ api_ip = self._get_api_ip()
+ if not api_ip:
+ return None
+ api_url = path % api_ip
+ (response, info) = fetch_url(module, api_url, force=True)
+ if response:
+ data = response.read()
+ else:
+ data = None
+ return data
+
+
+ def _get_dhcp_lease_file(self):
+ """Return the path of the lease file."""
+ default_iface = self.facts['default_ipv4']['interface']
+ dhcp_lease_file_locations = [
+ '/var/lib/dhcp/dhclient.%s.leases' % default_iface, # debian / ubuntu
+ '/var/lib/dhclient/dhclient-%s.leases' % default_iface, # centos 6
+ '/var/lib/dhclient/dhclient--%s.lease' % default_iface, # centos 7
+ '/var/db/dhclient.leases.%s' % default_iface, # openbsd
+ ]
+ for file_path in dhcp_lease_file_locations:
+ if os.path.exists(file_path):
+ return file_path
+ module.fail_json(msg="Could not find dhclient leases file.")
+
+
+ def _get_api_ip(self):
+ """Return the IP of the DHCP server."""
+ if not self.api_ip:
+ dhcp_lease_file = self._get_dhcp_lease_file()
+ for line in open(dhcp_lease_file):
+ if 'dhcp-server-identifier' in line:
+ # get IP of string "option dhcp-server-identifier 185.19.28.176;"
+ line = line.translate(None, ';')
+ self.api_ip = line.split()[2]
+ break
+ if not self.api_ip:
+ module.fail_json(msg="No dhcp-server-identifier found in leases file.")
+ return self.api_ip
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec = dict(
+ filter = dict(default=None, choices=[
+ 'cloudstack_service_offering',
+ 'cloudstack_availability_zone',
+ 'cloudstack_public_hostname',
+ 'cloudstack_public_ipv4',
+ 'cloudstack_local_hostname',
+ 'cloudstack_local_ipv4',
+ 'cloudstack_instance_id',
+ 'cloudstack_user_data',
+ ]),
+ ),
+ supports_check_mode=False
+ )
+
+ if not has_lib_yaml:
+ module.fail_json(msg="missing python library: yaml")
+
+ cs_facts = CloudStackFacts().run()
+ cs_facts_result = dict(changed=False, ansible_facts=cs_facts)
+ module.exit_json(**cs_facts_result)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+from ansible.module_utils.facts import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_firewall.py b/lib/ansible/modules/cloud/cloudstack/cs_firewall.py
new file mode 100644
index 0000000000..160e58d472
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_firewall.py
@@ -0,0 +1,433 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_firewall
+short_description: Manages firewall rules on Apache CloudStack based clouds.
+description:
+ - Creates and removes firewall rules.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ ip_address:
+ description:
+ - Public IP address the ingress rule is assigned to.
+ - Required if C(type=ingress).
+ required: false
+ default: null
+ network:
+ description:
+ - Network the egress rule is related to.
+ - Required if C(type=egress).
+ required: false
+ default: null
+ state:
+ description:
+ - State of the firewall rule.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ type:
+ description:
+ - Type of the firewall rule.
+ required: false
+ default: 'ingress'
+ choices: [ 'ingress', 'egress' ]
+ protocol:
+ description:
+ - Protocol of the firewall rule.
+ - C(all) is only available if C(type=egress)
+ required: false
+ default: 'tcp'
+ choices: [ 'tcp', 'udp', 'icmp', 'all' ]
+ cidr:
+ description:
+ - CIDR (full notation) to be used for firewall rule.
+ required: false
+ default: '0.0.0.0/0'
+ start_port:
+ description:
+ - Start port for this rule. Considered if C(protocol=tcp) or C(protocol=udp).
+ required: false
+ default: null
+ aliases: [ 'port' ]
+ end_port:
+ description:
+ - End port for this rule. Considered if C(protocol=tcp) or C(protocol=udp). If not specified, equal C(start_port).
+ required: false
+ default: null
+ icmp_type:
+ description:
+ - Type of the icmp message being sent. Considered if C(protocol=icmp).
+ required: false
+ default: null
+ icmp_code:
+ description:
+ - Error code for this icmp message. Considered if C(protocol=icmp).
+ required: false
+ default: null
+ domain:
+ description:
+ - Domain the firewall rule is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the firewall rule is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the firewall rule is related to.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the virtual machine is in.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Allow inbound port 80/tcp from 1.2.3.4 to 4.3.2.1
+- local_action:
+ module: cs_firewall
+ ip_address: 4.3.2.1
+ port: 80
+ cidr: 1.2.3.4/32
+
+# Allow inbound tcp/udp port 53 to 4.3.2.1
+- local_action:
+ module: cs_firewall
+ ip_address: 4.3.2.1
+ port: 53
+ protocol: '{{ item }}'
+ with_items:
+ - tcp
+ - udp
+
+# Ensure firewall rule is removed
+- local_action:
+ module: cs_firewall
+ ip_address: 4.3.2.1
+ start_port: 8000
+ end_port: 8888
+ cidr: 17.0.0.0/8
+ state: absent
+
+# Allow all outbound traffic
+- local_action:
+ module: cs_firewall
+ network: my_network
+ type: egress
+ protocol: all
+
+# Allow only HTTP outbound traffic for an IP
+- local_action:
+ module: cs_firewall
+ network: my_network
+ type: egress
+ port: 80
+ cidr: 10.101.1.20
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the rule.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+ip_address:
+ description: IP address of the rule if C(type=ingress)
+ returned: success
+ type: string
+ sample: 10.100.212.10
+type:
+ description: Type of the rule.
+ returned: success
+ type: string
+ sample: ingress
+cidr:
+ description: CIDR of the rule.
+ returned: success
+ type: string
+ sample: 0.0.0.0/0
+protocol:
+ description: Protocol of the rule.
+ returned: success
+ type: string
+ sample: tcp
+start_port:
+ description: Start port of the rule.
+ returned: success
+ type: int
+ sample: 80
+end_port:
+ description: End port of the rule.
+ returned: success
+ type: int
+ sample: 80
+icmp_code:
+ description: ICMP code of the rule.
+ returned: success
+ type: int
+ sample: 1
+icmp_type:
+ description: ICMP type of the rule.
+ returned: success
+ type: int
+ sample: 1
+network:
+ description: Name of the network if C(type=egress)
+ returned: success
+ type: string
+ sample: my_network
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackFirewall(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackFirewall, self).__init__(module)
+ self.returns = {
+ 'cidrlist': 'cidr',
+ 'startport': 'start_port',
+ 'endpoint': 'end_port',
+ 'protocol': 'protocol',
+ 'ipaddress': 'ip_address',
+ 'icmpcode': 'icmp_code',
+ 'icmptype': 'icmp_type',
+ }
+ self.firewall_rule = None
+ self.network = None
+
+
+ def get_firewall_rule(self):
+ if not self.firewall_rule:
+ cidr = self.module.params.get('cidr')
+ protocol = self.module.params.get('protocol')
+ start_port = self.module.params.get('start_port')
+ end_port = self.get_or_fallback('end_port', 'start_port')
+ icmp_code = self.module.params.get('icmp_code')
+ icmp_type = self.module.params.get('icmp_type')
+ fw_type = self.module.params.get('type')
+
+ if protocol in ['tcp', 'udp'] and not (start_port and end_port):
+ self.module.fail_json(msg="missing required argument for protocol '%s': start_port or end_port" % protocol)
+
+ if protocol == 'icmp' and not icmp_type:
+ self.module.fail_json(msg="missing required argument for protocol 'icmp': icmp_type")
+
+ if protocol == 'all' and fw_type != 'egress':
+ self.module.fail_json(msg="protocol 'all' could only be used for type 'egress'" )
+
+ args = {}
+ args['account'] = self.get_account('name')
+ args['domainid'] = self.get_domain('id')
+ args['projectid'] = self.get_project('id')
+
+ if fw_type == 'egress':
+ args['networkid'] = self.get_network(key='id')
+ if not args['networkid']:
+ self.module.fail_json(msg="missing required argument for type egress: network")
+ firewall_rules = self.cs.listEgressFirewallRules(**args)
+ else:
+ args['ipaddressid'] = self.get_ip_address('id')
+ if not args['ipaddressid']:
+ self.module.fail_json(msg="missing required argument for type ingress: ip_address")
+ firewall_rules = self.cs.listFirewallRules(**args)
+
+ if firewall_rules and 'firewallrule' in firewall_rules:
+ for rule in firewall_rules['firewallrule']:
+ type_match = self._type_cidr_match(rule, cidr)
+
+ protocol_match = self._tcp_udp_match(rule, protocol, start_port, end_port) \
+ or self._icmp_match(rule, protocol, icmp_code, icmp_type) \
+ or self._egress_all_match(rule, protocol, fw_type)
+
+ if type_match and protocol_match:
+ self.firewall_rule = rule
+ break
+ return self.firewall_rule
+
+
+ def _tcp_udp_match(self, rule, protocol, start_port, end_port):
+ return protocol in ['tcp', 'udp'] \
+ and protocol == rule['protocol'] \
+ and start_port == int(rule['startport']) \
+ and end_port == int(rule['endport'])
+
+
+ def _egress_all_match(self, rule, protocol, fw_type):
+ return protocol in ['all'] \
+ and protocol == rule['protocol'] \
+ and fw_type == 'egress'
+
+
+ def _icmp_match(self, rule, protocol, icmp_code, icmp_type):
+ return protocol == 'icmp' \
+ and protocol == rule['protocol'] \
+ and icmp_code == rule['icmpcode'] \
+ and icmp_type == rule['icmptype']
+
+
+ def _type_cidr_match(self, rule, cidr):
+ return cidr == rule['cidrlist']
+
+
+ def create_firewall_rule(self):
+ firewall_rule = self.get_firewall_rule()
+ if not firewall_rule:
+ self.result['changed'] = True
+
+ args = {}
+ args['cidrlist'] = self.module.params.get('cidr')
+ args['protocol'] = self.module.params.get('protocol')
+ args['startport'] = self.module.params.get('start_port')
+ args['endport'] = self.get_or_fallback('end_port', 'start_port')
+ args['icmptype'] = self.module.params.get('icmp_type')
+ args['icmpcode'] = self.module.params.get('icmp_code')
+
+ fw_type = self.module.params.get('type')
+ if not self.module.check_mode:
+ if fw_type == 'egress':
+ args['networkid'] = self.get_network(key='id')
+ res = self.cs.createEgressFirewallRule(**args)
+ else:
+ args['ipaddressid'] = self.get_ip_address('id')
+ res = self.cs.createFirewallRule(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ firewall_rule = self.poll_job(res, 'firewallrule')
+ return firewall_rule
+
+
+ def remove_firewall_rule(self):
+ firewall_rule = self.get_firewall_rule()
+ if firewall_rule:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = firewall_rule['id']
+
+ fw_type = self.module.params.get('type')
+ if not self.module.check_mode:
+ if fw_type == 'egress':
+ res = self.cs.deleteEgressFirewallRule(**args)
+ else:
+ res = self.cs.deleteFirewallRule(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ res = self.poll_job(res, 'firewallrule')
+ return firewall_rule
+
+
+ def get_result(self, firewall_rule):
+ super(AnsibleCloudStackFirewall, self).get_result(firewall_rule)
+ if firewall_rule:
+ self.result['type'] = self.module.params.get('type')
+ if self.result['type'] == 'egress':
+ self.result['network'] = self.get_network(key='displaytext')
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ ip_address = dict(default=None),
+ network = dict(default=None),
+ cidr = dict(default='0.0.0.0/0'),
+ protocol = dict(choices=['tcp', 'udp', 'icmp', 'all'], default='tcp'),
+ type = dict(choices=['ingress', 'egress'], default='ingress'),
+ icmp_type = dict(type='int', default=None),
+ icmp_code = dict(type='int', default=None),
+ start_port = dict(type='int', aliases=['port'], default=None),
+ end_port = dict(type='int', default=None),
+ state = dict(choices=['present', 'absent'], default='present'),
+ zone = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ required_together = cs_required_together()
+ required_together.extend([
+ ['icmp_type', 'icmp_code'],
+ ])
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=required_together,
+ required_one_of = (
+ ['ip_address', 'network'],
+ ),
+ mutually_exclusive = (
+ ['icmp_type', 'start_port'],
+ ['icmp_type', 'end_port'],
+ ['ip_address', 'network'],
+ ),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_fw = AnsibleCloudStackFirewall(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ fw_rule = acs_fw.remove_firewall_rule()
+ else:
+ fw_rule = acs_fw.create_firewall_rule()
+
+ result = acs_fw.get_result(fw_rule)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_instance.py b/lib/ansible/modules/cloud/cloudstack/cs_instance.py
new file mode 100644
index 0000000000..58c9872485
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_instance.py
@@ -0,0 +1,1034 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_instance
+short_description: Manages instances and virtual machines on Apache CloudStack based clouds.
+description:
+ - Deploy, start, update, scale, restart, restore, stop and destroy instances.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Host name of the instance. C(name) can only contain ASCII letters.
+ - Name will be generated (UUID) by CloudStack if not specified and can not be changed afterwards.
+ - Either C(name) or C(display_name) is required.
+ required: false
+ default: null
+ display_name:
+ description:
+ - Custom display name of the instances.
+ - Display name will be set to C(name) if not specified.
+ - Either C(name) or C(display_name) is required.
+ required: false
+ default: null
+ group:
+ description:
+ - Group in where the new instance should be in.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the instance.
+ required: false
+ default: 'present'
+ choices: [ 'deployed', 'started', 'stopped', 'restarted', 'restored', 'destroyed', 'expunged', 'present', 'absent' ]
+ service_offering:
+ description:
+ - Name or id of the service offering of the new instance.
+ - If not set, first found service offering is used.
+ required: false
+ default: null
+ cpu:
+ description:
+ - The number of CPUs to allocate to the instance, used with custom service offerings
+ required: false
+ default: null
+ cpu_speed:
+ description:
+ - The clock speed/shares allocated to the instance, used with custom service offerings
+ required: false
+ default: null
+ memory:
+ description:
+ - The memory allocated to the instance, used with custom service offerings
+ required: false
+ default: null
+ template:
+ description:
+ - Name or id of the template to be used for creating the new instance.
+ - Required when using C(state=present).
+ - Mutually exclusive with C(ISO) option.
+ required: false
+ default: null
+ iso:
+ description:
+ - Name or id of the ISO to be used for creating the new instance.
+ - Required when using C(state=present).
+ - Mutually exclusive with C(template) option.
+ required: false
+ default: null
+ template_filter:
+ description:
+ - Name of the filter used to search for the template or iso.
+ - Used for params C(iso) or C(template) on C(state=present).
+ required: false
+ default: 'executable'
+ choices: [ 'featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community' ]
+ aliases: [ 'iso_filter' ]
+ version_added: '2.1'
+ hypervisor:
+ description:
+ - Name the hypervisor to be used for creating the new instance.
+ - Relevant when using C(state=present), but only considered if not set on ISO/template.
+ - If not set or found on ISO/template, first found hypervisor will be used.
+ required: false
+ default: null
+ choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ]
+ keyboard:
+ description:
+ - Keyboard device type for the instance.
+ required: false
+ default: null
+ choices: [ 'de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us' ]
+ networks:
+ description:
+ - List of networks to use for the new instance.
+ required: false
+ default: []
+ aliases: [ 'network' ]
+ ip_address:
+ description:
+ - IPv4 address for default instance's network during creation.
+ required: false
+ default: null
+ ip6_address:
+ description:
+ - IPv6 address for default instance's network.
+ required: false
+ default: null
+ ip_to_networks:
+ description:
+ - "List of mappings in the form {'network': NetworkName, 'ip': 1.2.3.4}"
+ - Mutually exclusive with C(networks) option.
+ required: false
+ default: null
+ aliases: [ 'ip_to_network' ]
+ disk_offering:
+ description:
+ - Name of the disk offering to be used.
+ required: false
+ default: null
+ disk_size:
+ description:
+ - Disk size in GByte required if deploying instance from ISO.
+ required: false
+ default: null
+ root_disk_size:
+ description:
+ - Root disk size in GByte required if deploying instance with KVM hypervisor and want resize the root disk size at startup (need CloudStack >= 4.4, cloud-initramfs-growroot installed and enabled in the template)
+ required: false
+ default: null
+ security_groups:
+ description:
+ - List of security groups the instance to be applied to.
+ required: false
+ default: null
+ aliases: [ 'security_group' ]
+ domain:
+ description:
+ - Domain the instance is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the instance is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the instance to be deployed in.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the instance shoud be deployed.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ ssh_key:
+ description:
+ - Name of the SSH key to be deployed on the new instance.
+ required: false
+ default: null
+ affinity_groups:
+ description:
+ - Affinity groups names to be applied to the new instance.
+ required: false
+ default: []
+ aliases: [ 'affinity_group' ]
+ user_data:
+ description:
+ - Optional data (ASCII) that can be sent to the instance upon a successful deployment.
+ - The data will be automatically base64 encoded.
+ - Consider switching to HTTP_POST by using C(CLOUDSTACK_METHOD=post) to increase the HTTP_GET size limit of 2KB to 32 KB.
+ required: false
+ default: null
+ vpc:
+ description:
+ - Name of the VPC.
+ required: false
+ default: null
+ version_added: "2.3"
+ force:
+ description:
+ - Force stop/start the instance if required to apply changes, otherwise a running instance will not be changed.
+ required: false
+ default: false
+ tags:
+ description:
+ - List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
+ - "If you want to delete all tags, set a empty list e.g. C(tags: [])."
+ required: false
+ default: null
+ aliases: [ 'tag' ]
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create a instance from an ISO
+# NOTE: Names of offerings and ISOs depending on the CloudStack configuration.
+- cs_instance:
+ name: web-vm-1
+ iso: Linux Debian 7 64-bit
+ hypervisor: VMware
+ project: Integration
+ zone: ch-zrh-ix-01
+ service_offering: 1cpu_1gb
+ disk_offering: PerfPlus Storage
+ disk_size: 20
+ networks:
+ - Server Integration
+ - Sync Integration
+ - Storage Integration
+ delegate_to: localhost
+
+# For changing a running instance, use the 'force' parameter
+- cs_instance:
+ name: web-vm-1
+ display_name: web-vm-01.example.com
+ iso: Linux Debian 7 64-bit
+ service_offering: 2cpu_2gb
+ force: yes
+ delegate_to: localhost
+
+# Create or update a instance on Exoscale's public cloud using display_name.
+# Note: user_data can be used to kickstart the instance using cloud-init yaml config.
+- cs_instance:
+ display_name: web-vm-1
+ template: Linux Debian 7 64-bit
+ service_offering: Tiny
+ ssh_key: john@example.com
+ tags:
+ - key: admin
+ value: john
+ - key: foo
+ value: bar
+ user_data: |
+ #cloud-config
+ packages:
+ - nginx
+ delegate_to: localhost
+
+# Create an instance with multiple interfaces specifying the IP addresses
+- cs_instance:
+ name: web-vm-1
+ template: Linux Debian 7 64-bit
+ service_offering: Tiny
+ ip_to_networks:
+ - network: NetworkA
+ ip: 10.1.1.1
+ - network: NetworkB
+ ip: 192.0.2.1
+ delegate_to: localhost
+
+# Ensure an instance is stopped
+- cs_instance:
+ name: web-vm-1
+ state: stopped
+ delegate_to: localhost
+
+# Ensure an instance is running
+- cs_instance:
+ name: web-vm-1
+ state: started
+ delegate_to: localhost
+
+# Remove an instance
+- cs_instance:
+ name: web-vm-1
+ state: absent
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the instance.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the instance.
+ returned: success
+ type: string
+ sample: web-01
+display_name:
+ description: Display name of the instance.
+ returned: success
+ type: string
+ sample: web-01
+group:
+ description: Group name of the instance is related.
+ returned: success
+ type: string
+ sample: web
+created:
+ description: Date of the instance was created.
+ returned: success
+ type: string
+ sample: 2014-12-01T14:57:57+0100
+password_enabled:
+ description: True if password setting is enabled.
+ returned: success
+ type: boolean
+ sample: true
+password:
+ description: The password of the instance if exists.
+ returned: success
+ type: string
+ sample: Ge2oe7Do
+ssh_key:
+ description: Name of SSH key deployed to instance.
+ returned: success
+ type: string
+ sample: key@work
+domain:
+ description: Domain the instance is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the instance is related to.
+ returned: success
+ type: string
+ sample: example account
+project:
+ description: Name of project the instance is related to.
+ returned: success
+ type: string
+ sample: Production
+default_ip:
+ description: Default IP address of the instance.
+ returned: success
+ type: string
+ sample: 10.23.37.42
+public_ip:
+ description: Public IP address with instance via static NAT rule.
+ returned: success
+ type: string
+ sample: 1.2.3.4
+iso:
+ description: Name of ISO the instance was deployed with.
+ returned: success
+ type: string
+ sample: Debian-8-64bit
+template:
+ description: Name of template the instance was deployed with.
+ returned: success
+ type: string
+ sample: Debian-8-64bit
+service_offering:
+ description: Name of the service offering the instance has.
+ returned: success
+ type: string
+ sample: 2cpu_2gb
+zone:
+ description: Name of zone the instance is in.
+ returned: success
+ type: string
+ sample: ch-gva-2
+state:
+ description: State of the instance.
+ returned: success
+ type: string
+ sample: Running
+security_groups:
+ description: Security groups the instance is in.
+ returned: success
+ type: list
+ sample: '[ "default" ]'
+affinity_groups:
+ description: Affinity groups the instance is in.
+ returned: success
+ type: list
+ sample: '[ "webservers" ]'
+tags:
+ description: List of resource tags associated with the instance.
+ returned: success
+ type: dict
+ sample: '[ { "key": "foo", "value": "bar" } ]'
+hypervisor:
+ description: Hypervisor related to this instance.
+ returned: success
+ type: string
+ sample: KVM
+instance_name:
+ description: Internal name of the instance (ROOT admin only).
+ returned: success
+ type: string
+ sample: i-44-3992-VM
+'''
+
+import base64
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackInstance(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackInstance, self).__init__(module)
+ self.returns = {
+ 'group': 'group',
+ 'hypervisor': 'hypervisor',
+ 'instancename': 'instance_name',
+ 'publicip': 'public_ip',
+ 'passwordenabled': 'password_enabled',
+ 'password': 'password',
+ 'serviceofferingname': 'service_offering',
+ 'isoname': 'iso',
+ 'templatename': 'template',
+ 'keypair': 'ssh_key',
+ }
+ self.instance = None
+ self.template = None
+ self.iso = None
+
+
+ def get_service_offering_id(self):
+ service_offering = self.module.params.get('service_offering')
+
+ service_offerings = self.cs.listServiceOfferings()
+ if service_offerings:
+ if not service_offering:
+ return service_offerings['serviceoffering'][0]['id']
+
+ for s in service_offerings['serviceoffering']:
+ if service_offering in [ s['name'], s['id'] ]:
+ return s['id']
+ self.module.fail_json(msg="Service offering '%s' not found" % service_offering)
+
+
+ def get_template_or_iso(self, key=None):
+ template = self.module.params.get('template')
+ iso = self.module.params.get('iso')
+
+ if not template and not iso:
+ return None
+
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['zoneid'] = self.get_zone(key='id')
+ args['isrecursive'] = True
+
+ if template:
+ if self.template:
+ return self._get_by_key(key, self.template)
+
+ args['templatefilter'] = self.module.params.get('template_filter')
+ templates = self.cs.listTemplates(**args)
+ if templates:
+ for t in templates['template']:
+ if template in [ t['displaytext'], t['name'], t['id'] ]:
+ self.template = t
+ return self._get_by_key(key, self.template)
+ self.module.fail_json(msg="Template '%s' not found" % template)
+
+ elif iso:
+ if self.iso:
+ return self._get_by_key(key, self.iso)
+ args['isofilter'] = self.module.params.get('template_filter')
+ isos = self.cs.listIsos(**args)
+ if isos:
+ for i in isos['iso']:
+ if iso in [ i['displaytext'], i['name'], i['id'] ]:
+ self.iso = i
+ return self._get_by_key(key, self.iso)
+ self.module.fail_json(msg="ISO '%s' not found" % iso)
+
+
+ def get_disk_offering_id(self):
+ disk_offering = self.module.params.get('disk_offering')
+
+ if not disk_offering:
+ return None
+
+ disk_offerings = self.cs.listDiskOfferings()
+ if disk_offerings:
+ for d in disk_offerings['diskoffering']:
+ if disk_offering in [ d['displaytext'], d['name'], d['id'] ]:
+ return d['id']
+ self.module.fail_json(msg="Disk offering '%s' not found" % disk_offering)
+
+
+ def get_instance(self):
+ instance = self.instance
+ if not instance:
+ instance_name = self.get_or_fallback('name', 'display_name')
+ vpc_id = self.get_vpc(key='id')
+ args = {
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'projectid': self.get_project(key='id'),
+ 'vpcid': vpc_id,
+ }
+ # Do not pass zoneid, as the instance name must be unique across zones.
+ instances = self.cs.listVirtualMachines(**args)
+ if instances:
+ for v in instances['virtualmachine']:
+ # Due the limitation of the API, there is no easy way (yet) to get only those VMs
+ # not belonging to a VPC.
+ if not vpc_id and self.is_vm_in_vpc(vm=v):
+ continue
+ if instance_name.lower() in [ v['name'].lower(), v['displayname'].lower(), v['id'] ]:
+ self.instance = v
+ break
+ return self.instance
+
+
+ def get_iptonetwork_mappings(self):
+ network_mappings = self.module.params.get('ip_to_networks')
+ if network_mappings is None:
+ return
+
+ if network_mappings and self.module.params.get('networks'):
+ self.module.fail_json(msg="networks and ip_to_networks are mutually exclusive.")
+
+ network_names = [n['network'] for n in network_mappings]
+ ids = self.get_network_ids(network_names)
+ res = []
+ for i, data in enumerate(network_mappings):
+ res.append({'networkid': ids[i], 'ip': data['ip']})
+ return res
+
+
+ def security_groups_has_changed(self):
+ security_groups = self.module.params.get('security_groups')
+ if security_groups is None:
+ return False
+
+ security_groups = [s.lower() for s in security_groups]
+ instance_security_groups = self.instance.get('securitygroup',[])
+
+ instance_security_group_names = []
+ for instance_security_group in instance_security_groups:
+ if instance_security_group['name'].lower() not in security_groups:
+ return True
+ else:
+ instance_security_group_names.append(instance_security_group['name'].lower())
+
+ for security_group in security_groups:
+ if security_group not in instance_security_group_names:
+ return True
+ return False
+
+
+ def get_network_ids(self, network_names=None):
+ if network_names is None:
+ network_names = self.module.params.get('networks')
+
+ if not network_names:
+ return None
+
+ args = {
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'projectid': self.get_project(key='id'),
+ 'zoneid': self.get_zone(key='id'),
+ 'vpcid': self.get_vpc(key='id'),
+ }
+ networks = self.cs.listNetworks(**args)
+ if not networks:
+ self.module.fail_json(msg="No networks available")
+
+ network_ids = []
+ network_displaytexts = []
+ for network_name in network_names:
+ for n in networks['network']:
+ if network_name in [ n['displaytext'], n['name'], n['id'] ]:
+ network_ids.append(n['id'])
+ network_displaytexts.append(n['name'])
+ break
+
+ if len(network_ids) != len(network_names):
+ self.module.fail_json(msg="Could not find all networks, networks list found: %s" % network_displaytexts)
+
+ return network_ids
+
+
+ def present_instance(self, start_vm=True):
+ instance = self.get_instance()
+
+ if not instance:
+ instance = self.deploy_instance(start_vm=start_vm)
+ else:
+ instance = self.recover_instance(instance=instance)
+ instance = self.update_instance(instance=instance, start_vm=start_vm)
+
+ # In check mode, we do not necessarely have an instance
+ if instance:
+ instance = self.ensure_tags(resource=instance, resource_type='UserVm')
+ # refresh instance data
+ self.instance = instance
+
+ return instance
+
+
+ def get_user_data(self):
+ user_data = self.module.params.get('user_data')
+ if user_data is not None:
+ user_data = base64.b64encode(str(user_data))
+ return user_data
+
+
+ def get_details(self):
+ res = None
+ cpu = self.module.params.get('cpu')
+ cpu_speed = self.module.params.get('cpu_speed')
+ memory = self.module.params.get('memory')
+ if all([cpu, cpu_speed, memory]):
+ res = [{
+ 'cpuNumber': cpu,
+ 'cpuSpeed': cpu_speed,
+ 'memory': memory,
+ }]
+ return res
+
+
+ def deploy_instance(self, start_vm=True):
+ self.result['changed'] = True
+ networkids = self.get_network_ids()
+ if networkids is not None:
+ networkids = ','.join(networkids)
+
+ args = {}
+ args['templateid'] = self.get_template_or_iso(key='id')
+ if not args['templateid']:
+ self.module.fail_json(msg="Template or ISO is required.")
+
+ args['zoneid'] = self.get_zone(key='id')
+ args['serviceofferingid'] = self.get_service_offering_id()
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['diskofferingid'] = self.get_disk_offering_id()
+ args['networkids'] = networkids
+ args['iptonetworklist'] = self.get_iptonetwork_mappings()
+ args['userdata'] = self.get_user_data()
+ args['keyboard'] = self.module.params.get('keyboard')
+ args['ipaddress'] = self.module.params.get('ip_address')
+ args['ip6address'] = self.module.params.get('ip6_address')
+ args['name'] = self.module.params.get('name')
+ args['displayname'] = self.get_or_fallback('display_name', 'name')
+ args['group'] = self.module.params.get('group')
+ args['keypair'] = self.module.params.get('ssh_key')
+ args['size'] = self.module.params.get('disk_size')
+ args['startvm'] = start_vm
+ args['rootdisksize'] = self.module.params.get('root_disk_size')
+ args['affinitygroupnames'] = ','.join(self.module.params.get('affinity_groups'))
+ args['details'] = self.get_details()
+
+ security_groups = self.module.params.get('security_groups')
+ if security_groups is not None:
+ args['securitygroupnames'] = ','.join(security_groups)
+
+ template_iso = self.get_template_or_iso()
+ if 'hypervisor' not in template_iso:
+ args['hypervisor'] = self.get_hypervisor()
+
+ instance = None
+ if not self.module.check_mode:
+ instance = self.cs.deployVirtualMachine(**args)
+
+ if 'errortext' in instance:
+ self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ instance = self.poll_job(instance, 'virtualmachine')
+ return instance
+
+
+ def update_instance(self, instance, start_vm=True):
+ # Service offering data
+ args_service_offering = {}
+ args_service_offering['id'] = instance['id']
+ if self.module.params.get('service_offering'):
+ args_service_offering['serviceofferingid'] = self.get_service_offering_id()
+ service_offering_changed = self.has_changed(args_service_offering, instance)
+
+ # Instance data
+ args_instance_update = {}
+ args_instance_update['id'] = instance['id']
+ args_instance_update['userdata'] = self.get_user_data()
+ args_instance_update['ostypeid'] = self.get_os_type(key='id')
+ if self.module.params.get('group'):
+ args_instance_update['group'] = self.module.params.get('group')
+ if self.module.params.get('display_name'):
+ args_instance_update['displayname'] = self.module.params.get('display_name')
+ instance_changed = self.has_changed(args_instance_update, instance)
+
+ # SSH key data
+ args_ssh_key = {}
+ args_ssh_key['id'] = instance['id']
+ args_ssh_key['projectid'] = self.get_project(key='id')
+ if self.module.params.get('ssh_key'):
+ args_ssh_key['keypair'] = self.module.params.get('ssh_key')
+ ssh_key_changed = self.has_changed(args_ssh_key, instance)
+
+ security_groups_changed = self.security_groups_has_changed()
+
+ changed = [
+ service_offering_changed,
+ instance_changed,
+ security_groups_changed,
+ ssh_key_changed,
+ ]
+
+ if True in changed:
+ force = self.module.params.get('force')
+ instance_state = instance['state'].lower()
+ if instance_state == 'stopped' or force:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+
+ # Ensure VM has stopped
+ instance = self.stop_instance()
+ instance = self.poll_job(instance, 'virtualmachine')
+ self.instance = instance
+
+ # Change service offering
+ if service_offering_changed:
+ res = self.cs.changeServiceForVirtualMachine(**args_service_offering)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ instance = res['virtualmachine']
+ self.instance = instance
+
+ # Update VM
+ if instance_changed or security_groups_changed:
+ if security_groups_changed:
+ args_instance_update['securitygroupnames'] = ','.join(self.module.params.get('security_groups'))
+ res = self.cs.updateVirtualMachine(**args_instance_update)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ instance = res['virtualmachine']
+ self.instance = instance
+
+ # Reset SSH key
+ if ssh_key_changed:
+ instance = self.cs.resetSSHKeyForVirtualMachine(**args_ssh_key)
+ if 'errortext' in instance:
+ self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
+
+ instance = self.poll_job(instance, 'virtualmachine')
+ self.instance = instance
+
+ # Start VM again if it was running before
+ if instance_state == 'running' and start_vm:
+ instance = self.start_instance()
+ return instance
+
+
+ def recover_instance(self, instance):
+ if instance['state'].lower() in [ 'destroying', 'destroyed' ]:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.recoverVirtualMachine(id=instance['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ instance = res['virtualmachine']
+ return instance
+
+
+ def absent_instance(self):
+ instance = self.get_instance()
+ if instance:
+ if instance['state'].lower() not in ['expunging', 'destroying', 'destroyed']:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.destroyVirtualMachine(id=instance['id'])
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ instance = self.poll_job(res, 'virtualmachine')
+ return instance
+
+
+ def expunge_instance(self):
+ instance = self.get_instance()
+ if instance:
+ res = {}
+ if instance['state'].lower() in [ 'destroying', 'destroyed' ]:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.destroyVirtualMachine(id=instance['id'], expunge=True)
+
+ elif instance['state'].lower() not in [ 'expunging' ]:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.destroyVirtualMachine(id=instance['id'], expunge=True)
+
+ if res and 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ res = self.poll_job(res, 'virtualmachine')
+ return instance
+
+
+ def stop_instance(self):
+ instance = self.get_instance()
+ # in check mode intance may not be instanciated
+ if instance:
+ if instance['state'].lower() in ['stopping', 'stopped']:
+ return instance
+
+ if instance['state'].lower() in ['starting', 'running']:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ instance = self.cs.stopVirtualMachine(id=instance['id'])
+
+ if 'errortext' in instance:
+ self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ instance = self.poll_job(instance, 'virtualmachine')
+ return instance
+
+
+ def start_instance(self):
+ instance = self.get_instance()
+ # in check mode intance may not be instanciated
+ if instance:
+ if instance['state'].lower() in ['starting', 'running']:
+ return instance
+
+ if instance['state'].lower() in ['stopped', 'stopping']:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ instance = self.cs.startVirtualMachine(id=instance['id'])
+
+ if 'errortext' in instance:
+ self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ instance = self.poll_job(instance, 'virtualmachine')
+ return instance
+
+
+ def restart_instance(self):
+ instance = self.get_instance()
+ # in check mode intance may not be instanciated
+ if instance:
+ if instance['state'].lower() in [ 'running', 'starting' ]:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ instance = self.cs.rebootVirtualMachine(id=instance['id'])
+
+ if 'errortext' in instance:
+ self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ instance = self.poll_job(instance, 'virtualmachine')
+
+ elif instance['state'].lower() in [ 'stopping', 'stopped' ]:
+ instance = self.start_instance()
+ return instance
+
+
+ def restore_instance(self):
+ instance = self.get_instance()
+ self.result['changed'] = True
+ # in check mode intance may not be instanciated
+ if instance:
+ args = {}
+ args['templateid'] = self.get_template_or_iso(key='id')
+ args['virtualmachineid'] = instance['id']
+ res = self.cs.restoreVirtualMachine(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ instance = self.poll_job(res, 'virtualmachine')
+ return instance
+
+
+ def get_result(self, instance):
+ super(AnsibleCloudStackInstance, self).get_result(instance)
+ if instance:
+ if 'securitygroup' in instance:
+ security_groups = []
+ for securitygroup in instance['securitygroup']:
+ security_groups.append(securitygroup['name'])
+ self.result['security_groups'] = security_groups
+ if 'affinitygroup' in instance:
+ affinity_groups = []
+ for affinitygroup in instance['affinitygroup']:
+ affinity_groups.append(affinitygroup['name'])
+ self.result['affinity_groups'] = affinity_groups
+ if 'nic' in instance:
+ for nic in instance['nic']:
+ if nic['isdefault'] and 'ipaddress' in nic:
+ self.result['default_ip'] = nic['ipaddress']
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(default=None),
+ display_name = dict(default=None),
+ group = dict(default=None),
+ state = dict(choices=['present', 'deployed', 'started', 'stopped', 'restarted', 'restored', 'absent', 'destroyed', 'expunged'], default='present'),
+ service_offering = dict(default=None),
+ cpu = dict(default=None, type='int'),
+ cpu_speed = dict(default=None, type='int'),
+ memory = dict(default=None, type='int'),
+ template = dict(default=None),
+ iso = dict(default=None),
+ template_filter = dict(default="executable", aliases=['iso_filter'], choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']),
+ networks = dict(type='list', aliases=[ 'network' ], default=None),
+ ip_to_networks = dict(type='list', aliases=['ip_to_network'], default=None),
+ ip_address = dict(defaul=None),
+ ip6_address = dict(defaul=None),
+ disk_offering = dict(default=None),
+ disk_size = dict(type='int', default=None),
+ root_disk_size = dict(type='int', default=None),
+ keyboard = dict(choices=['de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us'], default=None),
+ hypervisor = dict(choices=CS_HYPERVISORS, default=None),
+ security_groups = dict(type='list', aliases=[ 'security_group' ], default=None),
+ affinity_groups = dict(type='list', aliases=[ 'affinity_group' ], default=[]),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ user_data = dict(default=None),
+ zone = dict(default=None),
+ ssh_key = dict(default=None),
+ force = dict(type='bool', default=False),
+ tags = dict(type='list', aliases=[ 'tag' ], default=None),
+ vpc = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ required_together = cs_required_together()
+ required_together.extend([
+ ['cpu', 'cpu_speed', 'memory'],
+ ])
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=required_together,
+ required_one_of = (
+ ['display_name', 'name'],
+ ),
+ mutually_exclusive = (
+ ['template', 'iso'],
+ ),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_instance = AnsibleCloudStackInstance(module)
+
+ state = module.params.get('state')
+
+ if state in ['absent', 'destroyed']:
+ instance = acs_instance.absent_instance()
+
+ elif state in ['expunged']:
+ instance = acs_instance.expunge_instance()
+
+ elif state in ['restored']:
+ acs_instance.present_instance()
+ instance = acs_instance.restore_instance()
+
+ elif state in ['present', 'deployed']:
+ instance = acs_instance.present_instance()
+
+ elif state in ['stopped']:
+ acs_instance.present_instance(start_vm=False)
+ instance = acs_instance.stop_instance()
+
+ elif state in ['started']:
+ acs_instance.present_instance()
+ instance = acs_instance.start_instance()
+
+ elif state in ['restarted']:
+ acs_instance.present_instance()
+ instance = acs_instance.restart_instance()
+
+ if instance and 'state' in instance and instance['state'].lower() == 'error':
+ module.fail_json(msg="Instance named '%s' in error state." % module.params.get('name'))
+
+ result = acs_instance.get_result(instance)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_instance_facts.py b/lib/ansible/modules/cloud/cloudstack/cs_instance_facts.py
new file mode 100644
index 0000000000..2aee631395
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_instance_facts.py
@@ -0,0 +1,278 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_instance_facts
+short_description: Gathering facts from the API of instances from Apache CloudStack based clouds.
+description:
+ - Gathering facts from the API of an instance.
+version_added: "2.1"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name or display name of the instance.
+ required: true
+ domain:
+ description:
+ - Domain the instance is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the instance is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Project the instance is related to.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+- cs_instance_facts:
+ name: web-vm-1
+ delegate_to: localhost
+
+- debug:
+ var: cloudstack_instance
+'''
+
+RETURN = '''
+---
+cloudstack_instance.id:
+ description: UUID of the instance.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+cloudstack_instance.name:
+ description: Name of the instance.
+ returned: success
+ type: string
+ sample: web-01
+cloudstack_instance.display_name:
+ description: Display name of the instance.
+ returned: success
+ type: string
+ sample: web-01
+cloudstack_instance.group:
+ description: Group name of the instance is related.
+ returned: success
+ type: string
+ sample: web
+created:
+ description: Date of the instance was created.
+ returned: success
+ type: string
+ sample: 2014-12-01T14:57:57+0100
+cloudstack_instance.password_enabled:
+ description: True if password setting is enabled.
+ returned: success
+ type: boolean
+ sample: true
+cloudstack_instance.password:
+ description: The password of the instance if exists.
+ returned: success
+ type: string
+ sample: Ge2oe7Do
+cloudstack_instance.ssh_key:
+ description: Name of SSH key deployed to instance.
+ returned: success
+ type: string
+ sample: key@work
+cloudstack_instance.domain:
+ description: Domain the instance is related to.
+ returned: success
+ type: string
+ sample: example domain
+cloudstack_instance.account:
+ description: Account the instance is related to.
+ returned: success
+ type: string
+ sample: example account
+cloudstack_instance.project:
+ description: Name of project the instance is related to.
+ returned: success
+ type: string
+ sample: Production
+cloudstack_instance.default_ip:
+ description: Default IP address of the instance.
+ returned: success
+ type: string
+ sample: 10.23.37.42
+cloudstack_instance.public_ip:
+ description: Public IP address with instance via static NAT rule.
+ returned: success
+ type: string
+ sample: 1.2.3.4
+cloudstack_instance.iso:
+ description: Name of ISO the instance was deployed with.
+ returned: success
+ type: string
+ sample: Debian-8-64bit
+cloudstack_instance.template:
+ description: Name of template the instance was deployed with.
+ returned: success
+ type: string
+ sample: Debian-8-64bit
+cloudstack_instance.service_offering:
+ description: Name of the service offering the instance has.
+ returned: success
+ type: string
+ sample: 2cpu_2gb
+cloudstack_instance.zone:
+ description: Name of zone the instance is in.
+ returned: success
+ type: string
+ sample: ch-gva-2
+cloudstack_instance.state:
+ description: State of the instance.
+ returned: success
+ type: string
+ sample: Running
+cloudstack_instance.security_groups:
+ description: Security groups the instance is in.
+ returned: success
+ type: list
+ sample: '[ "default" ]'
+cloudstack_instance.affinity_groups:
+ description: Affinity groups the instance is in.
+ returned: success
+ type: list
+ sample: '[ "webservers" ]'
+cloudstack_instance.tags:
+ description: List of resource tags associated with the instance.
+ returned: success
+ type: dict
+ sample: '[ { "key": "foo", "value": "bar" } ]'
+cloudstack_instance.hypervisor:
+ description: Hypervisor related to this instance.
+ returned: success
+ type: string
+ sample: KVM
+cloudstack_instance.instance_name:
+ description: Internal name of the instance (ROOT admin only).
+ returned: success
+ type: string
+ sample: i-44-3992-VM
+'''
+
+import base64
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackInstanceFacts(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackInstanceFacts, self).__init__(module)
+ self.instance = None
+ self.returns = {
+ 'group': 'group',
+ 'hypervisor': 'hypervisor',
+ 'instancename': 'instance_name',
+ 'publicip': 'public_ip',
+ 'passwordenabled': 'password_enabled',
+ 'password': 'password',
+ 'serviceofferingname': 'service_offering',
+ 'isoname': 'iso',
+ 'templatename': 'template',
+ 'keypair': 'ssh_key',
+ }
+ self.facts = {
+ 'cloudstack_instance': None,
+ }
+
+
+ def get_instance(self):
+ instance = self.instance
+ if not instance:
+ instance_name = self.module.params.get('name')
+
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ # Do not pass zoneid, as the instance name must be unique across zones.
+ instances = self.cs.listVirtualMachines(**args)
+ if instances:
+ for v in instances['virtualmachine']:
+ if instance_name.lower() in [ v['name'].lower(), v['displayname'].lower(), v['id'] ]:
+ self.instance = v
+ break
+ return self.instance
+
+
+ def run(self):
+ instance = self.get_instance()
+ if not instance:
+ self.module.fail_json(msg="Instance not found: %s" % self.module.params.get('name'))
+ self.facts['cloudstack_instance'] = self.get_result(instance)
+ return self.facts
+
+
+ def get_result(self, instance):
+ super(AnsibleCloudStackInstanceFacts, self).get_result(instance)
+ if instance:
+ if 'securitygroup' in instance:
+ security_groups = []
+ for securitygroup in instance['securitygroup']:
+ security_groups.append(securitygroup['name'])
+ self.result['security_groups'] = security_groups
+ if 'affinitygroup' in instance:
+ affinity_groups = []
+ for affinitygroup in instance['affinitygroup']:
+ affinity_groups.append(affinitygroup['name'])
+ self.result['affinity_groups'] = affinity_groups
+ if 'nic' in instance:
+ for nic in instance['nic']:
+ if nic['isdefault'] and 'ipaddress' in nic:
+ self.result['default_ip'] = nic['ipaddress']
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False,
+ )
+
+ cs_instance_facts = AnsibleCloudStackInstanceFacts(module=module).run()
+ cs_facts_result = dict(changed=False, ansible_facts=cs_instance_facts)
+ module.exit_json(**cs_facts_result)
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_instancegroup.py b/lib/ansible/modules/cloud/cloudstack/cs_instancegroup.py
new file mode 100644
index 0000000000..12b2bc7bae
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_instancegroup.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_instancegroup
+short_description: Manages instance groups on Apache CloudStack based clouds.
+description:
+ - Create and remove instance groups.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the instance group.
+ required: true
+ domain:
+ description:
+ - Domain the instance group is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the instance group is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Project the instance group is related to.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the instance group.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create an instance group
+- local_action:
+ module: cs_instancegroup
+ name: loadbalancers
+
+# Remove an instance group
+- local_action:
+ module: cs_instancegroup
+ name: loadbalancers
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the instance group.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the instance group.
+ returned: success
+ type: string
+ sample: webservers
+created:
+ description: Date when the instance group was created.
+ returned: success
+ type: string
+ sample: 2015-05-03T15:05:51+0200
+domain:
+ description: Domain the instance group is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the instance group is related to.
+ returned: success
+ type: string
+ sample: example account
+project:
+ description: Project the instance group is related to.
+ returned: success
+ type: string
+ sample: example project
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackInstanceGroup(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackInstanceGroup, self).__init__(module)
+ self.instance_group = None
+
+
+ def get_instance_group(self):
+ if self.instance_group:
+ return self.instance_group
+
+ name = self.module.params.get('name')
+
+ args = {}
+ args['account'] = self.get_account('name')
+ args['domainid'] = self.get_domain('id')
+ args['projectid'] = self.get_project('id')
+
+ instance_groups = self.cs.listInstanceGroups(**args)
+ if instance_groups:
+ for g in instance_groups['instancegroup']:
+ if name in [ g['name'], g['id'] ]:
+ self.instance_group = g
+ break
+ return self.instance_group
+
+
+ def present_instance_group(self):
+ instance_group = self.get_instance_group()
+ if not instance_group:
+ self.result['changed'] = True
+
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['account'] = self.get_account('name')
+ args['domainid'] = self.get_domain('id')
+ args['projectid'] = self.get_project('id')
+
+ if not self.module.check_mode:
+ res = self.cs.createInstanceGroup(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ instance_group = res['instancegroup']
+ return instance_group
+
+
+ def absent_instance_group(self):
+ instance_group = self.get_instance_group()
+ if instance_group:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.deleteInstanceGroup(id=instance_group['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ return instance_group
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ state = dict(default='present', choices=['present', 'absent']),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_ig = AnsibleCloudStackInstanceGroup(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ instance_group = acs_ig.absent_instance_group()
+ else:
+ instance_group = acs_ig.present_instance_group()
+
+ result = acs_ig.get_result(instance_group)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_ip_address.py b/lib/ansible/modules/cloud/cloudstack/cs_ip_address.py
new file mode 100644
index 0000000000..233720827f
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_ip_address.py
@@ -0,0 +1,244 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Darren Worrall <darren@iweb.co.uk>
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_ip_address
+short_description: Manages public IP address associations on Apache CloudStack based clouds.
+description:
+ - Acquires and associates a public IP to an account or project. Due to API
+ limitations this is not an idempotent call, so be sure to only
+ conditionally call this when C(state=present)
+version_added: '2.0'
+author:
+ - "Darren Worrall (@dazworrall)"
+ - "René Moser (@resmo)"
+options:
+ ip_address:
+ description:
+ - Public IP address.
+ - Required if C(state=absent)
+ required: false
+ default: null
+ domain:
+ description:
+ - Domain the IP address is related to.
+ required: false
+ default: null
+ network:
+ description:
+ - Network the IP address is related to.
+ required: false
+ default: null
+ vpc:
+ description:
+ - VPC the IP address is related to.
+ required: false
+ default: null
+ version_added: "2.2"
+ account:
+ description:
+ - Account the IP address is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the IP address is related to.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the IP address is in.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Associate an IP address conditonally
+- local_action:
+ module: cs_ip_address
+ network: My Network
+ register: ip_address
+ when: instance.public_ip is undefined
+
+# Disassociate an IP address
+- local_action:
+ module: cs_ip_address
+ ip_address: 1.2.3.4
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the Public IP address.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+ip_address:
+ description: Public IP address.
+ returned: success
+ type: string
+ sample: 1.2.3.4
+zone:
+ description: Name of zone the IP address is related to.
+ returned: success
+ type: string
+ sample: ch-gva-2
+project:
+ description: Name of project the IP address is related to.
+ returned: success
+ type: string
+ sample: Production
+account:
+ description: Account the IP address is related to.
+ returned: success
+ type: string
+ sample: example account
+domain:
+ description: Domain the IP address is related to.
+ returned: success
+ type: string
+ sample: example domain
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackIPAddress(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackIPAddress, self).__init__(module)
+ self.returns = {
+ 'ipaddress': 'ip_address',
+ }
+
+ def get_ip_address(self, key=None):
+ if self.ip_address:
+ return self._get_by_key(key, self.ip_address)
+
+ ip_address = self.module.params.get('ip_address')
+ args = {
+ 'ipaddress': self.module.params.get('ip_address'),
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'projectid': self.get_project(key='id'),
+ 'vpcid': self.get_vpc(key='id'),
+ }
+ ip_addresses = self.cs.listPublicIpAddresses(**args)
+
+ if ip_addresses:
+ self.ip_address = ip_addresses['publicipaddress'][0]
+ return self._get_by_key(key, self.ip_address)
+
+ def associate_ip_address(self):
+ self.result['changed'] = True
+ args = {
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'projectid': self.get_project(key='id'),
+ 'networkid': self.get_network(key='id'),
+ 'zoneid': self.get_zone(key='id'),
+ }
+ ip_address = None
+ if not self.module.check_mode:
+ res = self.cs.associateIpAddress(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ ip_address = self.poll_job(res, 'ipaddress')
+ return ip_address
+
+ def disassociate_ip_address(self):
+ ip_address = self.get_ip_address()
+ if not ip_address:
+ return None
+ if ip_address['isstaticnat']:
+ self.module.fail_json(msg="IP address is allocated via static nat")
+
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.disassociateIpAddress(id=ip_address['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ self.poll_job(res, 'ipaddress')
+ return ip_address
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ ip_address = dict(required=False),
+ state = dict(choices=['present', 'absent'], default='present'),
+ vpc = dict(default=None),
+ network = dict(default=None),
+ zone = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ required_if=[
+ ('state', 'absent', ['ip_address']),
+ ],
+ supports_check_mode=True
+ )
+
+ try:
+ acs_ip_address = AnsibleCloudStackIPAddress(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ ip_address = acs_ip_address.disassociate_ip_address()
+ else:
+ ip_address = acs_ip_address.associate_ip_address()
+
+ result = acs_ip_address.get_result(ip_address)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_iso.py b/lib/ansible/modules/cloud/cloudstack/cs_iso.py
new file mode 100644
index 0000000000..ee84bd22f2
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_iso.py
@@ -0,0 +1,339 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_iso
+short_description: Manages ISO images on Apache CloudStack based clouds.
+description:
+ - Register and remove ISO images.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the ISO.
+ required: true
+ url:
+ description:
+ - URL where the ISO can be downloaded from. Required if C(state) is present.
+ required: false
+ default: null
+ os_type:
+ description:
+ - Name of the OS that best represents the OS of this ISO. If the iso is bootable this parameter needs to be passed. Required if C(state) is present.
+ required: false
+ default: null
+ is_ready:
+ description:
+ - This flag is used for searching existing ISOs. If set to C(true), it will only list ISO ready for deployment e.g. successfully downloaded and installed. Recommended to set it to C(false).
+ required: false
+ default: false
+ aliases: []
+ is_public:
+ description:
+ - Register the ISO to be publicly available to all users. Only used if C(state) is present.
+ required: false
+ default: false
+ is_featured:
+ description:
+ - Register the ISO to be featured. Only used if C(state) is present.
+ required: false
+ default: false
+ is_dynamically_scalable:
+ description:
+ - Register the ISO having XS/VMWare tools installed inorder to support dynamic scaling of VM cpu/memory. Only used if C(state) is present.
+ required: false
+ default: false
+ aliases: []
+ checksum:
+ description:
+ - The MD5 checksum value of this ISO. If set, we search by checksum instead of name.
+ required: false
+ default: false
+ bootable:
+ description:
+ - Register the ISO to be bootable. Only used if C(state) is present.
+ required: false
+ default: true
+ domain:
+ description:
+ - Domain the ISO is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the ISO is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the ISO to be registered in.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone you wish the ISO to be registered or deleted from. If not specified, first zone found will be used.
+ required: false
+ default: null
+ iso_filter:
+ description:
+ - Name of the filter used to search for the ISO.
+ required: false
+ default: 'self'
+ choices: [ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]
+ state:
+ description:
+ - State of the ISO.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Register an ISO if ISO name does not already exist.
+- local_action:
+ module: cs_iso
+ name: Debian 7 64-bit
+ url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso
+ os_type: Debian GNU/Linux 7(64-bit)
+
+# Register an ISO with given name if ISO md5 checksum does not already exist.
+- local_action:
+ module: cs_iso
+ name: Debian 7 64-bit
+ url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso
+ os_type: Debian GNU/Linux 7(64-bit)
+ checksum: 0b31bccccb048d20b551f70830bb7ad0
+
+# Remove an ISO by name
+- local_action:
+ module: cs_iso
+ name: Debian 7 64-bit
+ state: absent
+
+# Remove an ISO by checksum
+- local_action:
+ module: cs_iso
+ name: Debian 7 64-bit
+ checksum: 0b31bccccb048d20b551f70830bb7ad0
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the ISO.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+name:
+ description: Name of the ISO.
+ returned: success
+ type: string
+ sample: Debian 7 64-bit
+display_text:
+ description: Text to be displayed of the ISO.
+ returned: success
+ type: string
+ sample: Debian 7.7 64-bit minimal 2015-03-19
+zone:
+ description: Name of zone the ISO is registered in.
+ returned: success
+ type: string
+ sample: zuerich
+status:
+ description: Status of the ISO.
+ returned: success
+ type: string
+ sample: Successfully Installed
+is_ready:
+ description: True if the ISO is ready to be deployed from.
+ returned: success
+ type: boolean
+ sample: true
+checksum:
+ description: MD5 checksum of the ISO.
+ returned: success
+ type: string
+ sample: 0b31bccccb048d20b551f70830bb7ad0
+created:
+ description: Date of registering.
+ returned: success
+ type: string
+ sample: 2015-03-29T14:57:06+0200
+domain:
+ description: Domain the ISO is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the ISO is related to.
+ returned: success
+ type: string
+ sample: example account
+project:
+ description: Project the ISO is related to.
+ returned: success
+ type: string
+ sample: example project
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackIso(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackIso, self).__init__(module)
+ self.returns = {
+ 'checksum': 'checksum',
+ 'status': 'status',
+ 'isready': 'is_ready',
+ }
+ self.iso = None
+
+ def register_iso(self):
+ iso = self.get_iso()
+ if not iso:
+
+ args = {}
+ args['zoneid'] = self.get_zone('id')
+ args['domainid'] = self.get_domain('id')
+ args['account'] = self.get_account('name')
+ args['projectid'] = self.get_project('id')
+ args['bootable'] = self.module.params.get('bootable')
+ args['ostypeid'] = self.get_os_type('id')
+ args['name'] = self.module.params.get('name')
+ args['displaytext'] = self.module.params.get('name')
+ args['checksum'] = self.module.params.get('checksum')
+ args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable')
+ args['isfeatured'] = self.module.params.get('is_featured')
+ args['ispublic'] = self.module.params.get('is_public')
+
+ if args['bootable'] and not args['ostypeid']:
+ self.module.fail_json(msg="OS type 'os_type' is requried if 'bootable=true'.")
+
+ args['url'] = self.module.params.get('url')
+ if not args['url']:
+ self.module.fail_json(msg="URL is requried.")
+
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.registerIso(**args)
+ iso = res['iso'][0]
+ return iso
+
+
+ def get_iso(self):
+ if not self.iso:
+
+ args = {}
+ args['isready'] = self.module.params.get('is_ready')
+ args['isofilter'] = self.module.params.get('iso_filter')
+ args['domainid'] = self.get_domain('id')
+ args['account'] = self.get_account('name')
+ args['projectid'] = self.get_project('id')
+ args['zoneid'] = self.get_zone('id')
+
+ # if checksum is set, we only look on that.
+ checksum = self.module.params.get('checksum')
+ if not checksum:
+ args['name'] = self.module.params.get('name')
+
+ isos = self.cs.listIsos(**args)
+ if isos:
+ if not checksum:
+ self.iso = isos['iso'][0]
+ else:
+ for i in isos['iso']:
+ if i['checksum'] == checksum:
+ self.iso = i
+ break
+ return self.iso
+
+
+ def remove_iso(self):
+ iso = self.get_iso()
+ if iso:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = iso['id']
+ args['projectid'] = self.get_project('id')
+ args['zoneid'] = self.get_zone('id')
+
+ if not self.module.check_mode:
+ res = self.cs.deleteIso(**args)
+ return iso
+
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ url = dict(default=None),
+ os_type = dict(default=None),
+ zone = dict(default=None),
+ iso_filter = dict(default='self', choices=[ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ checksum = dict(default=None),
+ is_ready = dict(type='bool', default=False),
+ bootable = dict(type='bool', default=True),
+ is_featured = dict(type='bool', default=False),
+ is_dynamically_scalable = dict(type='bool', default=False),
+ state = dict(choices=['present', 'absent'], default='present'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_iso = AnsibleCloudStackIso(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ iso = acs_iso.remove_iso()
+ else:
+ iso = acs_iso.register_iso()
+
+ result = acs_iso.get_result(iso)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_loadbalancer_rule.py b/lib/ansible/modules/cloud/cloudstack/cs_loadbalancer_rule.py
new file mode 100644
index 0000000000..2e5f11e415
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_loadbalancer_rule.py
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Darren Worrall <darren@iweb.co.uk>
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_loadbalancer_rule
+short_description: Manages load balancer rules on Apache CloudStack based clouds.
+description:
+ - Add, update and remove load balancer rules.
+version_added: '2.0'
+author:
+ - "Darren Worrall (@dazworrall)"
+ - "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - The name of the load balancer rule.
+ required: true
+ description:
+ description:
+ - The description of the load balancer rule.
+ required: false
+ default: null
+ algorithm:
+ description:
+ - Load balancer algorithm
+ - Required when using C(state=present).
+ required: false
+ choices: [ 'source', 'roundrobin', 'leastconn' ]
+ default: 'source'
+ private_port:
+ description:
+ - The private port of the private ip address/virtual machine where the network traffic will be load balanced to.
+ - Required when using C(state=present).
+ - Can not be changed once the rule exists due API limitation.
+ required: false
+ default: null
+ public_port:
+ description:
+ - The public port from where the network traffic will be load balanced from.
+ - Required when using C(state=present).
+ - Can not be changed once the rule exists due API limitation.
+ required: true
+ default: null
+ ip_address:
+ description:
+ - Public IP address from where the network traffic will be load balanced from.
+ required: true
+ aliases: [ 'public_ip' ]
+ open_firewall:
+ description:
+ - Whether the firewall rule for public port should be created, while creating the new rule.
+ - Use M(cs_firewall) for managing firewall rules.
+ required: false
+ default: false
+ cidr:
+ description:
+ - CIDR (full notation) to be used for firewall rule if required.
+ required: false
+ default: null
+ protocol:
+ description:
+ - The protocol to be used on the load balancer
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the load balancer IP address is related to.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the rule.
+ required: true
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ domain:
+ description:
+ - Domain the rule is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the rule is related to.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the rule shoud be created.
+ - If not set, default zone is used.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create a load balancer rule
+- local_action:
+ module: cs_loadbalancer_rule
+ name: balance_http
+ public_ip: 1.2.3.4
+ algorithm: leastconn
+ public_port: 80
+ private_port: 8080
+
+# update algorithm of an existing load balancer rule
+- local_action:
+ module: cs_loadbalancer_rule
+ name: balance_http
+ public_ip: 1.2.3.4
+ algorithm: roundrobin
+ public_port: 80
+ private_port: 8080
+
+# Delete a load balancer rule
+- local_action:
+ module: cs_loadbalancer_rule
+ name: balance_http
+ public_ip: 1.2.3.4
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the rule.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+zone:
+ description: Name of zone the rule is related to.
+ returned: success
+ type: string
+ sample: ch-gva-2
+project:
+ description: Name of project the rule is related to.
+ returned: success
+ type: string
+ sample: Production
+account:
+ description: Account the rule is related to.
+ returned: success
+ type: string
+ sample: example account
+domain:
+ description: Domain the rule is related to.
+ returned: success
+ type: string
+ sample: example domain
+algorithm:
+ description: Load balancer algorithm used.
+ returned: success
+ type: string
+ sample: "source"
+cidr:
+ description: CIDR to forward traffic from.
+ returned: success
+ type: string
+ sample: ""
+name:
+ description: Name of the rule.
+ returned: success
+ type: string
+ sample: "http-lb"
+description:
+ description: Description of the rule.
+ returned: success
+ type: string
+ sample: "http load balancer rule"
+protocol:
+ description: Protocol of the rule.
+ returned: success
+ type: string
+ sample: "tcp"
+public_port:
+ description: Public port.
+ returned: success
+ type: string
+ sample: 80
+private_port:
+ description: Private IP address.
+ returned: success
+ type: string
+ sample: 80
+public_ip:
+ description: Public IP address.
+ returned: success
+ type: string
+ sample: "1.2.3.4"
+tags:
+ description: List of resource tags associated with the rule.
+ returned: success
+ type: dict
+ sample: '[ { "key": "foo", "value": "bar" } ]'
+state:
+ description: State of the rule.
+ returned: success
+ type: string
+ sample: "Add"
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackLBRule(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackLBRule, self).__init__(module)
+ self.returns = {
+ 'publicip': 'public_ip',
+ 'algorithm': 'algorithm',
+ 'cidrlist': 'cidr',
+ 'protocol': 'protocol',
+ }
+ # these values will be casted to int
+ self.returns_to_int = {
+ 'publicport': 'public_port',
+ 'privateport': 'private_port',
+ }
+
+
+ def get_rule(self, **kwargs):
+ rules = self.cs.listLoadBalancerRules(**kwargs)
+ if rules:
+ return rules['loadbalancerrule'][0]
+
+
+ def _get_common_args(self):
+ return {
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'projectid': self.get_project(key='id'),
+ 'zoneid': self.get_zone(key='id'),
+ 'publicipid': self.get_ip_address(key='id'),
+ 'name': self.module.params.get('name'),
+ }
+
+
+ def present_lb_rule(self):
+ missing_params = []
+ for required_params in [
+ 'algorithm',
+ 'private_port',
+ 'public_port',
+ ]:
+ if not self.module.params.get(required_params):
+ missing_params.append(required_params)
+ if missing_params:
+ self.module.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
+
+ args = self._get_common_args()
+ rule = self.get_rule(**args)
+ if rule:
+ rule = self._update_lb_rule(rule)
+ else:
+ rule = self._create_lb_rule(rule)
+
+ if rule:
+ rule = self.ensure_tags(resource=rule, resource_type='LoadBalancer')
+ return rule
+
+
+ def _create_lb_rule(self, rule):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ args = self._get_common_args()
+ args['algorithm'] = self.module.params.get('algorithm')
+ args['privateport'] = self.module.params.get('private_port')
+ args['publicport'] = self.module.params.get('public_port')
+ args['cidrlist'] = self.module.params.get('cidr')
+ args['description'] = self.module.params.get('description')
+ args['protocol'] = self.module.params.get('protocol')
+ res = self.cs.createLoadBalancerRule(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ rule = self.poll_job(res, 'loadbalancer')
+ return rule
+
+
+ def _update_lb_rule(self, rule):
+ args = {}
+ args['id'] = rule['id']
+ args['algorithm'] = self.module.params.get('algorithm')
+ args['description'] = self.module.params.get('description')
+ if self.has_changed(args, rule):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.updateLoadBalancerRule(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ rule = self.poll_job(res, 'loadbalancer')
+ return rule
+
+
+ def absent_lb_rule(self):
+ args = self._get_common_args()
+ rule = self.get_rule(**args)
+ if rule:
+ self.result['changed'] = True
+ if rule and not self.module.check_mode:
+ res = self.cs.deleteLoadBalancerRule(id=rule['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ res = self.poll_job(res, 'loadbalancer')
+ return rule
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ description = dict(default=None),
+ algorithm = dict(choices=['source', 'roundrobin', 'leastconn'], default='source'),
+ private_port = dict(type='int', default=None),
+ public_port = dict(type='int', default=None),
+ protocol = dict(default=None),
+ state = dict(choices=['present', 'absent'], default='present'),
+ ip_address = dict(required=True, aliases=['public_ip']),
+ cidr = dict(default=None),
+ project = dict(default=None),
+ open_firewall = dict(type='bool', default=False),
+ tags = dict(type='list', aliases=['tag'], default=None),
+ zone = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_lb_rule = AnsibleCloudStackLBRule(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ rule = acs_lb_rule.absent_lb_rule()
+ else:
+ rule = acs_lb_rule.present_lb_rule()
+
+ result = acs_lb_rule.get_result(rule)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_loadbalancer_rule_member.py b/lib/ansible/modules/cloud/cloudstack/cs_loadbalancer_rule_member.py
new file mode 100644
index 0000000000..0695ed9be5
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_loadbalancer_rule_member.py
@@ -0,0 +1,364 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Darren Worrall <darren@iweb.co.uk>
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_loadbalancer_rule_member
+short_description: Manages load balancer rule members on Apache CloudStack based clouds.
+description:
+ - Add and remove load balancer rule members.
+version_added: '2.0'
+author:
+ - "Darren Worrall (@dazworrall)"
+ - "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - The name of the load balancer rule.
+ required: true
+ ip_address:
+ description:
+ - Public IP address from where the network traffic will be load balanced from.
+ - Only needed to find the rule if C(name) is not unique.
+ required: false
+ default: null
+ aliases: [ 'public_ip' ]
+ vms:
+ description:
+ - List of VMs to assign to or remove from the rule.
+ required: true
+ type: list
+ aliases: [ 'vm' ]
+ state:
+ description:
+ - Should the VMs be present or absent from the rule.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ project:
+ description:
+ - Name of the project the firewall rule is related to.
+ required: false
+ default: null
+ domain:
+ description:
+ - Domain the rule is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the rule is related to.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the rule should be located.
+ - If not set, default zone is used.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Add VMs to an exising load balancer
+- local_action:
+ module: cs_loadbalancer_rule_member
+ name: balance_http
+ vms:
+ - web01
+ - web02
+
+# Remove a VM from an existing load balancer
+- local_action:
+ module: cs_loadbalancer_rule_member
+ name: balance_http
+ vms:
+ - web01
+ - web02
+ state: absent
+
+# Rolling upgrade of hosts
+- hosts: webservers
+ serial: 1
+ pre_tasks:
+ - name: Remove from load balancer
+ local_action:
+ module: cs_loadbalancer_rule_member
+ name: balance_http
+ vm: "{{ ansible_hostname }}"
+ state: absent
+ tasks:
+ # Perform update
+ post_tasks:
+ - name: Add to load balancer
+ local_action:
+ module: cs_loadbalancer_rule_member
+ name: balance_http
+ vm: "{{ ansible_hostname }}"
+ state: present
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the rule.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+zone:
+ description: Name of zone the rule is related to.
+ returned: success
+ type: string
+ sample: ch-gva-2
+project:
+ description: Name of project the rule is related to.
+ returned: success
+ type: string
+ sample: Production
+account:
+ description: Account the rule is related to.
+ returned: success
+ type: string
+ sample: example account
+domain:
+ description: Domain the rule is related to.
+ returned: success
+ type: string
+ sample: example domain
+algorithm:
+ description: Load balancer algorithm used.
+ returned: success
+ type: string
+ sample: "source"
+cidr:
+ description: CIDR to forward traffic from.
+ returned: success
+ type: string
+ sample: ""
+name:
+ description: Name of the rule.
+ returned: success
+ type: string
+ sample: "http-lb"
+description:
+ description: Description of the rule.
+ returned: success
+ type: string
+ sample: "http load balancer rule"
+protocol:
+ description: Protocol of the rule.
+ returned: success
+ type: string
+ sample: "tcp"
+public_port:
+ description: Public port.
+ returned: success
+ type: string
+ sample: 80
+private_port:
+ description: Private IP address.
+ returned: success
+ type: string
+ sample: 80
+public_ip:
+ description: Public IP address.
+ returned: success
+ type: string
+ sample: "1.2.3.4"
+vms:
+ description: Rule members.
+ returned: success
+ type: list
+ sample: '[ "web01", "web02" ]'
+tags:
+ description: List of resource tags associated with the rule.
+ returned: success
+ type: dict
+ sample: '[ { "key": "foo", "value": "bar" } ]'
+state:
+ description: State of the rule.
+ returned: success
+ type: string
+ sample: "Add"
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackLBRuleMember(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackLBRuleMember, self).__init__(module)
+ self.returns = {
+ 'publicip': 'public_ip',
+ 'algorithm': 'algorithm',
+ 'cidrlist': 'cidr',
+ 'protocol': 'protocol',
+ }
+ # these values will be casted to int
+ self.returns_to_int = {
+ 'publicport': 'public_port',
+ 'privateport': 'private_port',
+ }
+
+
+ def get_rule(self):
+ args = self._get_common_args()
+ args['name'] = self.module.params.get('name')
+ args['zoneid'] = self.get_zone(key='id')
+ if self.module.params.get('ip_address'):
+ args['publicipid'] = self.get_ip_address(key='id')
+ rules = self.cs.listLoadBalancerRules(**args)
+ if rules:
+ if len(rules['loadbalancerrule']) > 1:
+ self.module.fail_json(msg="More than one rule having name %s. Please pass 'ip_address' as well." % args['name'])
+ return rules['loadbalancerrule'][0]
+ return None
+
+
+ def _get_common_args(self):
+ return {
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'projectid': self.get_project(key='id'),
+ }
+
+
+ def _get_members_of_rule(self, rule):
+ res = self.cs.listLoadBalancerRuleInstances(id=rule['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ return res.get('loadbalancerruleinstance', [])
+
+
+ def _ensure_members(self, operation):
+ if operation not in ['add', 'remove']:
+ self.module.fail_json(msg="Bad operation: %s" % operation)
+
+ rule = self.get_rule()
+ if not rule:
+ self.module.fail_json(msg="Unknown rule: %s" % self.module.params.get('name'))
+
+ existing = {}
+ for vm in self._get_members_of_rule(rule=rule):
+ existing[vm['name']] = vm['id']
+
+ wanted_names = self.module.params.get('vms')
+
+ if operation =='add':
+ cs_func = self.cs.assignToLoadBalancerRule
+ to_change = set(wanted_names) - set(existing.keys())
+ else:
+ cs_func = self.cs.removeFromLoadBalancerRule
+ to_change = set(wanted_names) & set(existing.keys())
+
+ if not to_change:
+ return rule
+
+ args = self._get_common_args()
+ vms = self.cs.listVirtualMachines(**args)
+ to_change_ids = []
+ for name in to_change:
+ for vm in vms.get('virtualmachine', []):
+ if vm['name'] == name:
+ to_change_ids.append(vm['id'])
+ break
+ else:
+ self.module.fail_json(msg="Unknown VM: %s" % name)
+
+ if to_change_ids:
+ self.result['changed'] = True
+
+ if to_change_ids and not self.module.check_mode:
+ res = cs_func(
+ id = rule['id'],
+ virtualmachineids = to_change_ids,
+ )
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ self.poll_job(res)
+ rule = self.get_rule()
+ return rule
+
+
+ def add_members(self):
+ return self._ensure_members('add')
+
+
+ def remove_members(self):
+ return self._ensure_members('remove')
+
+
+ def get_result(self, rule):
+ super(AnsibleCloudStackLBRuleMember, self).get_result(rule)
+ if rule:
+ self.result['vms'] = []
+ for vm in self._get_members_of_rule(rule=rule):
+ self.result['vms'].append(vm['name'])
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ ip_address = dict(default=None, aliases=['public_ip']),
+ vms = dict(required=True, aliases=['vm'], type='list'),
+ state = dict(choices=['present', 'absent'], default='present'),
+ zone = dict(default=None),
+ domain = dict(default=None),
+ project = dict(default=None),
+ account = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_lb_rule_member = AnsibleCloudStackLBRuleMember(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ rule = acs_lb_rule_member.remove_members()
+ else:
+ rule = acs_lb_rule_member.add_members()
+
+ result = acs_lb_rule_member.get_result(rule)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_network.py b/lib/ansible/modules/cloud/cloudstack/cs_network.py
new file mode 100644
index 0000000000..092fbf7326
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_network.py
@@ -0,0 +1,564 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_network
+short_description: Manages networks on Apache CloudStack based clouds.
+description:
+ - Create, update, restart and delete networks.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name (case sensitive) of the network.
+ required: true
+ display_text:
+ description:
+ - Display text of the network.
+ - If not specified, C(name) will be used as C(display_text).
+ required: false
+ default: null
+ network_offering:
+ description:
+ - Name of the offering for the network.
+ - Required if C(state=present).
+ required: false
+ default: null
+ start_ip:
+ description:
+ - The beginning IPv4 address of the network belongs to.
+ - Only considered on create.
+ required: false
+ default: null
+ end_ip:
+ description:
+ - The ending IPv4 address of the network belongs to.
+ - If not specified, value of C(start_ip) is used.
+ - Only considered on create.
+ required: false
+ default: null
+ gateway:
+ description:
+ - The gateway of the network.
+ - Required for shared networks and isolated networks when it belongs to a VPC.
+ - Only considered on create.
+ required: false
+ default: null
+ netmask:
+ description:
+ - The netmask of the network.
+ - Required for shared networks and isolated networks when it belongs to a VPC.
+ - Only considered on create.
+ required: false
+ default: null
+ start_ipv6:
+ description:
+ - The beginning IPv6 address of the network belongs to.
+ - Only considered on create.
+ required: false
+ default: null
+ end_ipv6:
+ description:
+ - The ending IPv6 address of the network belongs to.
+ - If not specified, value of C(start_ipv6) is used.
+ - Only considered on create.
+ required: false
+ default: null
+ cidr_ipv6:
+ description:
+ - CIDR of IPv6 network, must be at least /64.
+ - Only considered on create.
+ required: false
+ default: null
+ gateway_ipv6:
+ description:
+ - The gateway of the IPv6 network.
+ - Required for shared networks.
+ - Only considered on create.
+ required: false
+ default: null
+ vlan:
+ description:
+ - The ID or VID of the network.
+ required: false
+ default: null
+ vpc:
+ description:
+ - Name of the VPC of the network.
+ required: false
+ default: null
+ isolated_pvlan:
+ description:
+ - The isolated private VLAN for this network.
+ required: false
+ default: null
+ clean_up:
+ description:
+ - Cleanup old network elements.
+ - Only considered on C(state=restarted).
+ required: false
+ default: false
+ acl_type:
+ description:
+ - Access control type.
+ - Only considered on create.
+ required: false
+ default: account
+ choices: [ 'account', 'domain' ]
+ network_domain:
+ description:
+ - The network domain.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the network.
+ required: false
+ default: present
+ choices: [ 'present', 'absent', 'restarted' ]
+ zone:
+ description:
+ - Name of the zone in which the network should be deployed.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the network to be deployed in.
+ required: false
+ default: null
+ domain:
+ description:
+ - Domain the network is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the network is related to.
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# create a network
+- local_action:
+ module: cs_network
+ name: my network
+ zone: gva-01
+ network_offering: DefaultIsolatedNetworkOfferingWithSourceNatService
+ network_domain: example.com
+
+# update a network
+- local_action:
+ module: cs_network
+ name: my network
+ display_text: network of domain example.local
+ network_domain: example.local
+
+# restart a network with clean up
+- local_action:
+ module: cs_network
+ name: my network
+ clean_up: yes
+ state: restared
+
+# remove a network
+- local_action:
+ module: cs_network
+ name: my network
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the network.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the network.
+ returned: success
+ type: string
+ sample: web project
+display_text:
+ description: Display text of the network.
+ returned: success
+ type: string
+ sample: web project
+dns1:
+ description: IP address of the 1st nameserver.
+ returned: success
+ type: string
+ sample: 1.2.3.4
+dns2:
+ description: IP address of the 2nd nameserver.
+ returned: success
+ type: string
+ sample: 1.2.3.4
+cidr:
+ description: IPv4 network CIDR.
+ returned: success
+ type: string
+ sample: 10.101.64.0/24
+gateway:
+ description: IPv4 gateway.
+ returned: success
+ type: string
+ sample: 10.101.64.1
+netmask:
+ description: IPv4 netmask.
+ returned: success
+ type: string
+ sample: 255.255.255.0
+cidr_ipv6:
+ description: IPv6 network CIDR.
+ returned: success
+ type: string
+ sample: 2001:db8::/64
+gateway_ipv6:
+ description: IPv6 gateway.
+ returned: success
+ type: string
+ sample: 2001:db8::1
+state:
+ description: State of the network.
+ returned: success
+ type: string
+ sample: Implemented
+zone:
+ description: Name of zone.
+ returned: success
+ type: string
+ sample: ch-gva-2
+domain:
+ description: Domain the network is related to.
+ returned: success
+ type: string
+ sample: ROOT
+account:
+ description: Account the network is related to.
+ returned: success
+ type: string
+ sample: example account
+project:
+ description: Name of project.
+ returned: success
+ type: string
+ sample: Production
+tags:
+ description: List of resource tags associated with the network.
+ returned: success
+ type: dict
+ sample: '[ { "key": "foo", "value": "bar" } ]'
+acl_type:
+ description: Access type of the network (Domain, Account).
+ returned: success
+ type: string
+ sample: Account
+broadcast_domain_type:
+ description: Broadcast domain type of the network.
+ returned: success
+ type: string
+ sample: Vlan
+type:
+ description: Type of the network.
+ returned: success
+ type: string
+ sample: Isolated
+traffic_type:
+ description: Traffic type of the network.
+ returned: success
+ type: string
+ sample: Guest
+state:
+ description: State of the network (Allocated, Implemented, Setup).
+ returned: success
+ type: string
+ sample: Allocated
+is_persistent:
+ description: Whether the network is persistent or not.
+ returned: success
+ type: boolean
+ sample: false
+network_domain:
+ description: The network domain
+ returned: success
+ type: string
+ sample: example.local
+network_offering:
+ description: The network offering name.
+ returned: success
+ type: string
+ sample: DefaultIsolatedNetworkOfferingWithSourceNatService
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackNetwork(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackNetwork, self).__init__(module)
+ self.returns = {
+ 'networkdomain': 'network domain',
+ 'networkofferingname': 'network_offering',
+ 'ispersistent': 'is_persistent',
+ 'acltype': 'acl_type',
+ 'type': 'type',
+ 'traffictype': 'traffic_type',
+ 'ip6gateway': 'gateway_ipv6',
+ 'ip6cidr': 'cidr_ipv6',
+ 'gateway': 'gateway',
+ 'cidr': 'cidr',
+ 'netmask': 'netmask',
+ 'broadcastdomaintype': 'broadcast_domain_type',
+ 'dns1': 'dns1',
+ 'dns2': 'dns2',
+ }
+ self.network = None
+
+
+ def get_network_offering(self, key=None):
+ network_offering = self.module.params.get('network_offering')
+ if not network_offering:
+ self.module.fail_json(msg="missing required arguments: network_offering")
+
+ args = {}
+ args['zoneid'] = self.get_zone(key='id')
+
+ network_offerings = self.cs.listNetworkOfferings(**args)
+ if network_offerings:
+ for no in network_offerings['networkoffering']:
+ if network_offering in [ no['name'], no['displaytext'], no['id'] ]:
+ return self._get_by_key(key, no)
+ self.module.fail_json(msg="Network offering '%s' not found" % network_offering)
+
+
+ def _get_args(self):
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['displaytext'] = self.get_or_fallback('display_text', 'name')
+ args['networkdomain'] = self.module.params.get('network_domain')
+ args['networkofferingid'] = self.get_network_offering(key='id')
+ return args
+
+
+ def get_network(self):
+ if not self.network:
+ network = self.module.params.get('name')
+
+ args = {}
+ args['zoneid'] = self.get_zone(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+
+ networks = self.cs.listNetworks(**args)
+ if networks:
+ for n in networks['network']:
+ if network in [ n['name'], n['displaytext'], n['id']]:
+ self.network = n
+ break
+ return self.network
+
+
+ def present_network(self):
+ network = self.get_network()
+ if not network:
+ network = self.create_network(network)
+ else:
+ network = self.update_network(network)
+ return network
+
+
+ def update_network(self, network):
+ args = self._get_args()
+ args['id'] = network['id']
+
+ if self.has_changed(args, network):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ network = self.cs.updateNetwork(**args)
+
+ if 'errortext' in network:
+ self.module.fail_json(msg="Failed: '%s'" % network['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if network and poll_async:
+ network = self.poll_job(network, 'network')
+ return network
+
+
+ def create_network(self, network):
+ self.result['changed'] = True
+
+ args = self._get_args()
+ args['acltype'] = self.module.params.get('acl_type')
+ args['zoneid'] = self.get_zone(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['startip'] = self.module.params.get('start_ip')
+ args['endip'] = self.get_or_fallback('end_ip', 'start_ip')
+ args['netmask'] = self.module.params.get('netmask')
+ args['gateway'] = self.module.params.get('gateway')
+ args['startipv6'] = self.module.params.get('start_ipv6')
+ args['endipv6'] = self.get_or_fallback('end_ipv6', 'start_ipv6')
+ args['ip6cidr'] = self.module.params.get('cidr_ipv6')
+ args['ip6gateway'] = self.module.params.get('gateway_ipv6')
+ args['vlan'] = self.module.params.get('vlan')
+ args['isolatedpvlan'] = self.module.params.get('isolated_pvlan')
+ args['subdomainaccess'] = self.module.params.get('subdomain_access')
+ args['vpcid'] = self.get_vpc(key='id')
+
+ if not self.module.check_mode:
+ res = self.cs.createNetwork(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ network = res['network']
+ return network
+
+
+ def restart_network(self):
+ network = self.get_network()
+
+ if not network:
+ self.module.fail_json(msg="No network named '%s' found." % self.module.params('name'))
+
+ # Restarting only available for these states
+ if network['state'].lower() in [ 'implemented', 'setup' ]:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = network['id']
+ args['cleanup'] = self.module.params.get('clean_up')
+
+ if not self.module.check_mode:
+ network = self.cs.restartNetwork(**args)
+
+ if 'errortext' in network:
+ self.module.fail_json(msg="Failed: '%s'" % network['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if network and poll_async:
+ network = self.poll_job(network, 'network')
+ return network
+
+
+ def absent_network(self):
+ network = self.get_network()
+ if network:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = network['id']
+
+ if not self.module.check_mode:
+ res = self.cs.deleteNetwork(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ res = self.poll_job(res, 'network')
+ return network
+
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ display_text = dict(default=None),
+ network_offering = dict(default=None),
+ zone = dict(default=None),
+ start_ip = dict(default=None),
+ end_ip = dict(default=None),
+ gateway = dict(default=None),
+ netmask = dict(default=None),
+ start_ipv6 = dict(default=None),
+ end_ipv6 = dict(default=None),
+ cidr_ipv6 = dict(default=None),
+ gateway_ipv6 = dict(default=None),
+ vlan = dict(default=None),
+ vpc = dict(default=None),
+ isolated_pvlan = dict(default=None),
+ clean_up = dict(type='bool', default=False),
+ network_domain = dict(default=None),
+ state = dict(choices=['present', 'absent', 'restarted' ], default='present'),
+ acl_type = dict(choices=['account', 'domain'], default='account'),
+ project = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+ required_together = cs_required_together()
+ required_together.extend([
+ ['start_ip', 'netmask', 'gateway'],
+ ['start_ipv6', 'cidr_ipv6', 'gateway_ipv6'],
+ ])
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=required_together,
+ supports_check_mode=True
+ )
+
+ try:
+ acs_network = AnsibleCloudStackNetwork(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ network = acs_network.absent_network()
+
+ elif state in ['restarted']:
+ network = acs_network.restart_network()
+
+ else:
+ network = acs_network.present_network()
+
+ result = acs_network.get_result(network)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_nic.py b/lib/ansible/modules/cloud/cloudstack/cs_nic.py
new file mode 100644
index 0000000000..a9947c266e
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_nic.py
@@ -0,0 +1,297 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_nic
+short_description: Manages NICs and secondary IPs of an instance on Apache CloudStack based clouds.
+description:
+ - Add and remove secondary IPs to and from a NIC.
+version_added: "2.3"
+author: "René Moser (@resmo)"
+options:
+ vm:
+ description:
+ - Name of instance.
+ required: true
+ aliases: ['name']
+ network:
+ description:
+ - Name of the network.
+ - Required to find the NIC if instance has multiple networks assigned.
+ required: false
+ default: null
+ vm_guest_ip:
+ description:
+ - Secondary IP address to be added to the instance nic.
+ - If not set, the API always returns a new IP address and idempotency is not given.
+ required: false
+ default: null
+ aliases: ['secondary_ip']
+ vpc:
+ description:
+ - Name of the VPC the C(vm) is related to.
+ required: false
+ default: null
+ domain:
+ description:
+ - Domain the instance is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the instance is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the instance is deployed in.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the instance is deployed in.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the ipaddress.
+ required: false
+ default: "present"
+ choices: [ 'present', 'absent' ]
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Assign a specific IP to the default NIC of the VM
+- local_action:
+ module: cs_nic
+ vm: customer_xy
+ vm_guest_ip: 10.10.10.10
+
+# Assign an IP to the default NIC of the VM
+# Note: If vm_guest_ip is not set, you will get a new IP address on every run.
+- local_action:
+ module: cs_nic
+ vm: customer_xy
+
+# Remove a specific IP from the default NIC
+- local_action:
+ module: cs_nic
+ vm: customer_xy
+ vm_guest_ip: 10.10.10.10
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the nic.
+ returned: success
+ type: string
+ sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
+vm:
+ description: Name of the VM.
+ returned: success
+ type: string
+ sample: web-01
+ip_address:
+ description: Primary IP of the NIC.
+ returned: success
+ type: string
+ sample: 10.10.10.10
+netmask:
+ description: Netmask of the NIC.
+ returned: success
+ type: string
+ sample: 255.255.255.0
+mac_address:
+ description: MAC address of the NIC.
+ returned: success
+ type: string
+ sample: 02:00:33:31:00:e4
+vm_guest_ip:
+ description: Secondary IP of the NIC.
+ returned: success
+ type: string
+ sample: 10.10.10.10
+network:
+ description: Name of the network if not default.
+ returned: success
+ type: string
+ sample: sync network
+domain:
+ description: Domain the VM is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the VM is related to.
+ returned: success
+ type: string
+ sample: example account
+project:
+ description: Name of project the VM is related to.
+ returned: success
+ type: string
+ sample: Production
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackNic(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackNic, self).__init__(module)
+ self.vm_guest_ip = self.module.params.get('vm_guest_ip')
+ self.nic = None
+ self.returns = {
+ 'ipaddress': 'ip_address',
+ 'macaddress': 'mac_address',
+ 'netmask': 'netmask',
+ }
+
+ def get_nic(self):
+ if self.nic:
+ return self.nic
+ args = {
+ 'virtualmachineid': self.get_vm(key='id'),
+ 'networkdid': self.get_network(key='id'),
+ }
+ nics = self.cs.listNics(**args)
+ if nics:
+ self.nic = nics['nic'][0]
+ return self.nic
+ self.module.fail_json("NIC for VM %s in network %s not found" (self.get_vm(key='name'), self.get_network(key='name')))
+
+ def get_secondary_ip(self):
+ nic = self.get_nic()
+ if self.vm_guest_ip:
+ secondary_ips = nic.get('secondaryip') or []
+ for secondary_ip in secondary_ips:
+ if secondary_ip['ipaddress'] == self.vm_guest_ip:
+ return secondary_ip
+ return None
+
+ def present_nic(self):
+ nic = self.get_nic()
+ if not self.get_secondary_ip():
+ self.result['changed'] = True
+ args = {
+ 'nicid': nic['id'],
+ 'ipaddress': self.vm_guest_ip,
+ }
+
+ if not self.module.check_mode:
+ res = self.cs.addIpToNic(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ nic = self.poll_job(res, 'nicsecondaryip')
+ # Save result for RETURNS
+ self.vm_guest_ip = nic['ipaddress']
+ return nic
+
+ def absent_nic(self):
+ nic = self.get_nic()
+ secondary_ip = self.get_secondary_ip()
+ if secondary_ip:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.removeIpFromNic(id=secondary_ip['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % nic['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ self.poll_job(res, 'nicsecondaryip')
+ return nic
+
+ def get_result(self, nic):
+ super(AnsibleCloudStackNic, self).get_result(nic)
+ if nic and not self.module.params.get('network'):
+ self.module.params['network'] = nic.get('networkid')
+ self.result['network'] = self.get_network(key='name')
+ self.result['vm'] = self.get_vm(key='name')
+ self.result['vm_guest_ip'] = self.vm_guest_ip
+ self.result['domain'] = self.get_domain(key='path')
+ self.result['account'] = self.get_account(key='name')
+ self.result['project'] = self.get_project(key='name')
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ vm=dict(required=True, aliases=['name']),
+ vm_guest_ip=dict(default=None, aliases=['secondary_ip']),
+ network=dict(default=None),
+ vpc=dict(default=None),
+ state=dict(choices=['present', 'absent'], default='present'),
+ domain=dict(default=None),
+ account=dict(default=None),
+ project=dict(default=None),
+ zone=dict(default=None),
+ poll_async=dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True,
+ required_if=([
+ ('state', 'absent', ['vm_guest_ip'])
+ ])
+ )
+
+ try:
+ acs_nic = AnsibleCloudStackNic(module)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ nic = acs_nic.absent_nic()
+ else:
+ nic = acs_nic.present_nic()
+
+ result = acs_nic.get_result(nic)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_pod.py b/lib/ansible/modules/cloud/cloudstack/cs_pod.py
new file mode 100644
index 0000000000..afccea1404
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_pod.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_pod
+short_description: Manages pods on Apache CloudStack based clouds.
+description:
+ - Create, update, delete pods.
+version_added: "2.1"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the pod.
+ required: true
+ id:
+ description:
+ - uuid of the exising pod.
+ default: null
+ required: false
+ start_ip:
+ description:
+ - Starting IP address for the Pod.
+ - Required on C(state=present)
+ default: null
+ required: false
+ end_ip:
+ description:
+ - Ending IP address for the Pod.
+ default: null
+ required: false
+ netmask:
+ description:
+ - Netmask for the Pod.
+ - Required on C(state=present)
+ default: null
+ required: false
+ gateway:
+ description:
+ - Gateway for the Pod.
+ - Required on C(state=present)
+ default: null
+ required: false
+ zone:
+ description:
+ - Name of the zone in which the pod belongs to.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the pod.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'enabled', 'disabled', 'absent' ]
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Ensure a pod is present
+- local_action:
+ module: cs_pod
+ name: pod1
+ zone: ch-zrh-ix-01
+ start_ip: 10.100.10.101
+ gateway: 10.100.10.1
+ netmask: 255.255.255.0
+
+# Ensure a pod is disabled
+- local_action:
+ module: cs_pod
+ name: pod1
+ zone: ch-zrh-ix-01
+ state: disabled
+
+# Ensure a pod is enabled
+- local_action:
+ module: cs_pod
+ name: pod1
+ zone: ch-zrh-ix-01
+ state: enabled
+
+# Ensure a pod is absent
+- local_action:
+ module: cs_pod
+ name: pod1
+ zone: ch-zrh-ix-01
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the pod.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the pod.
+ returned: success
+ type: string
+ sample: pod01
+start_ip:
+ description: Starting IP of the pod.
+ returned: success
+ type: string
+ sample: 10.100.1.101
+end_ip:
+ description: Ending IP of the pod.
+ returned: success
+ type: string
+ sample: 10.100.1.254
+netmask:
+ description: Netmask of the pod.
+ returned: success
+ type: string
+ sample: 255.255.255.0
+gateway:
+ description: Gateway of the pod.
+ returned: success
+ type: string
+ sample: 10.100.1.1
+allocation_state:
+ description: State of the pod.
+ returned: success
+ type: string
+ sample: Enabled
+zone:
+ description: Name of zone the pod is in.
+ returned: success
+ type: string
+ sample: ch-gva-2
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackPod(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackPod, self).__init__(module)
+ self.returns = {
+ 'endip': 'end_ip',
+ 'startip': 'start_ip',
+ 'gateway': 'gateway',
+ 'netmask': 'netmask',
+ 'allocationstate': 'allocation_state',
+ }
+ self.pod = None
+
+
+ def _get_common_pod_args(self):
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['zoneid'] = self.get_zone(key='id')
+ args['startip'] = self.module.params.get('start_ip')
+ args['endip'] = self.module.params.get('end_ip')
+ args['netmask'] = self.module.params.get('netmask')
+ args['gateway'] = self.module.params.get('gateway')
+ state = self.module.params.get('state')
+ if state in [ 'enabled', 'disabled']:
+ args['allocationstate'] = state.capitalize()
+ return args
+
+
+ def get_pod(self):
+ if not self.pod:
+ args = {}
+
+ uuid = self.module.params.get('id')
+ if uuid:
+ args['id'] = uuid
+ args['zoneid'] = self.get_zone(key='id')
+ pods = self.cs.listPods(**args)
+ if pods:
+ self.pod = pods['pod'][0]
+ return self.pod
+
+ args['name'] = self.module.params.get('name')
+ args['zoneid'] = self.get_zone(key='id')
+ pods = self.cs.listPods(**args)
+ if pods:
+ self.pod = pods['pod'][0]
+ return self.pod
+
+
+ def present_pod(self):
+ pod = self.get_pod()
+ if pod:
+ pod = self._update_pod()
+ else:
+ pod = self._create_pod()
+ return pod
+
+
+ def _create_pod(self):
+ required_params = [
+ 'start_ip',
+ 'netmask',
+ 'gateway',
+ ]
+ self.module.fail_on_missing_params(required_params=required_params)
+
+ pod = None
+ self.result['changed'] = True
+ args = self._get_common_pod_args()
+ if not self.module.check_mode:
+ res = self.cs.createPod(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ pod = res['pod']
+ return pod
+
+
+ def _update_pod(self):
+ pod = self.get_pod()
+ args = self._get_common_pod_args()
+ args['id'] = pod['id']
+
+ if self.has_changed(args, pod):
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ res = self.cs.updatePod(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ pod = res['pod']
+ return pod
+
+
+ def absent_pod(self):
+ pod = self.get_pod()
+ if pod:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = pod['id']
+
+ if not self.module.check_mode:
+ res = self.cs.deletePod(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ return pod
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ id = dict(default=None),
+ name = dict(required=True),
+ gateway = dict(default=None),
+ netmask = dict(default=None),
+ start_ip = dict(default=None),
+ end_ip = dict(default=None),
+ zone = dict(default=None),
+ state = dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_pod = AnsibleCloudStackPod(module)
+ state = module.params.get('state')
+ if state in ['absent']:
+ pod = acs_pod.absent_pod()
+ else:
+ pod = acs_pod.present_pod()
+
+ result = acs_pod.get_result(pod)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_portforward.py b/lib/ansible/modules/cloud/cloudstack/cs_portforward.py
new file mode 100644
index 0000000000..139fa7773d
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_portforward.py
@@ -0,0 +1,387 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_portforward
+short_description: Manages port forwarding rules on Apache CloudStack based clouds.
+description:
+ - Create, update and remove port forwarding rules.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ ip_address:
+ description:
+ - Public IP address the rule is assigned to.
+ required: true
+ vm:
+ description:
+ - Name of virtual machine which we make the port forwarding rule for.
+ - Required if C(state=present).
+ required: false
+ default: null
+ state:
+ description:
+ - State of the port forwarding rule.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ protocol:
+ description:
+ - Protocol of the port forwarding rule.
+ required: false
+ default: 'tcp'
+ choices: [ 'tcp', 'udp' ]
+ public_port:
+ description:
+ - Start public port for this rule.
+ required: true
+ public_end_port:
+ description:
+ - End public port for this rule.
+ - If not specified equal C(public_port).
+ required: false
+ default: null
+ private_port:
+ description:
+ - Start private port for this rule.
+ required: true
+ private_end_port:
+ description:
+ - End private port for this rule.
+ - If not specified equal C(private_port).
+ required: false
+ default: null
+ open_firewall:
+ description:
+ - Whether the firewall rule for public port should be created, while creating the new rule.
+ - Use M(cs_firewall) for managing firewall rules.
+ required: false
+ default: false
+ vm_guest_ip:
+ description:
+ - VM guest NIC secondary IP address for the port forwarding rule.
+ required: false
+ default: false
+ domain:
+ description:
+ - Domain the C(vm) is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the C(vm) is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the C(vm) is located in.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the virtual machine is in.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# 1.2.3.4:80 -> web01:8080
+- local_action:
+ module: cs_portforward
+ ip_address: 1.2.3.4
+ vm: web01
+ public_port: 80
+ private_port: 8080
+
+# forward SSH and open firewall
+- local_action:
+ module: cs_portforward
+ ip_address: '{{ public_ip }}'
+ vm: '{{ inventory_hostname }}'
+ public_port: '{{ ansible_ssh_port }}'
+ private_port: 22
+ open_firewall: true
+
+# forward DNS traffic, but do not open firewall
+- local_action:
+ module: cs_portforward
+ ip_address: 1.2.3.4
+ vm: '{{ inventory_hostname }}'
+ public_port: 53
+ private_port: 53
+ protocol: udp
+
+# remove ssh port forwarding
+- local_action:
+ module: cs_portforward
+ ip_address: 1.2.3.4
+ public_port: 22
+ private_port: 22
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the public IP address.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+ip_address:
+ description: Public IP address.
+ returned: success
+ type: string
+ sample: 1.2.3.4
+protocol:
+ description: Protocol.
+ returned: success
+ type: string
+ sample: tcp
+private_port:
+ description: Start port on the virtual machine's IP address.
+ returned: success
+ type: int
+ sample: 80
+private_end_port:
+ description: End port on the virtual machine's IP address.
+ returned: success
+ type: int
+public_port:
+ description: Start port on the public IP address.
+ returned: success
+ type: int
+ sample: 80
+public_end_port:
+ description: End port on the public IP address.
+ returned: success
+ type: int
+ sample: 80
+tags:
+ description: Tags related to the port forwarding.
+ returned: success
+ type: list
+ sample: []
+vm_name:
+ description: Name of the virtual machine.
+ returned: success
+ type: string
+ sample: web-01
+vm_display_name:
+ description: Display name of the virtual machine.
+ returned: success
+ type: string
+ sample: web-01
+vm_guest_ip:
+ description: IP of the virtual machine.
+ returned: success
+ type: string
+ sample: 10.101.65.152
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackPortforwarding, self).__init__(module)
+ self.returns = {
+ 'virtualmachinedisplayname': 'vm_display_name',
+ 'virtualmachinename': 'vm_name',
+ 'ipaddress': 'ip_address',
+ 'vmguestip': 'vm_guest_ip',
+ 'publicip': 'public_ip',
+ 'protocol': 'protocol',
+ }
+ # these values will be casted to int
+ self.returns_to_int = {
+ 'publicport': 'public_port',
+ 'publicendport': 'public_end_port',
+ 'privateport': 'private_port',
+ 'privateendport': 'private_end_port',
+ }
+ self.portforwarding_rule = None
+
+
+ def get_portforwarding_rule(self):
+ if not self.portforwarding_rule:
+ protocol = self.module.params.get('protocol')
+ public_port = self.module.params.get('public_port')
+ public_end_port = self.get_or_fallback('public_end_port', 'public_port')
+ private_port = self.module.params.get('private_port')
+ private_end_port = self.get_or_fallback('private_end_port', 'private_port')
+
+ args = {}
+ args['ipaddressid'] = self.get_ip_address(key='id')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ portforwarding_rules = self.cs.listPortForwardingRules(**args)
+
+ if portforwarding_rules and 'portforwardingrule' in portforwarding_rules:
+ for rule in portforwarding_rules['portforwardingrule']:
+ if protocol == rule['protocol'] \
+ and public_port == int(rule['publicport']):
+ self.portforwarding_rule = rule
+ break
+ return self.portforwarding_rule
+
+
+ def present_portforwarding_rule(self):
+ portforwarding_rule = self.get_portforwarding_rule()
+ if portforwarding_rule:
+ portforwarding_rule = self.update_portforwarding_rule(portforwarding_rule)
+ else:
+ portforwarding_rule = self.create_portforwarding_rule()
+ return portforwarding_rule
+
+
+ def create_portforwarding_rule(self):
+ args = {}
+ args['protocol'] = self.module.params.get('protocol')
+ args['publicport'] = self.module.params.get('public_port')
+ args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
+ args['privateport'] = self.module.params.get('private_port')
+ args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
+ args['openfirewall'] = self.module.params.get('open_firewall')
+ args['vmguestip'] = self.get_vm_guest_ip()
+ args['ipaddressid'] = self.get_ip_address(key='id')
+ args['virtualmachineid'] = self.get_vm(key='id')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+
+ portforwarding_rule = None
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ portforwarding_rule = self.cs.createPortForwardingRule(**args)
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
+ return portforwarding_rule
+
+
+ def update_portforwarding_rule(self, portforwarding_rule):
+ args = {}
+ args['protocol'] = self.module.params.get('protocol')
+ args['publicport'] = self.module.params.get('public_port')
+ args['publicendport'] = self.get_or_fallback('public_end_port', 'public_port')
+ args['privateport'] = self.module.params.get('private_port')
+ args['privateendport'] = self.get_or_fallback('private_end_port', 'private_port')
+ args['vmguestip'] = self.get_vm_guest_ip()
+ args['ipaddressid'] = self.get_ip_address(key='id')
+ args['virtualmachineid'] = self.get_vm(key='id')
+
+ if self.has_changed(args, portforwarding_rule):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ # API broken in 4.2.1?, workaround using remove/create instead of update
+ # portforwarding_rule = self.cs.updatePortForwardingRule(**args)
+ self.absent_portforwarding_rule()
+ portforwarding_rule = self.cs.createPortForwardingRule(**args)
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
+ return portforwarding_rule
+
+
+ def absent_portforwarding_rule(self):
+ portforwarding_rule = self.get_portforwarding_rule()
+
+ if portforwarding_rule:
+ self.result['changed'] = True
+ args = {}
+ args['id'] = portforwarding_rule['id']
+
+ if not self.module.check_mode:
+ res = self.cs.deletePortForwardingRule(**args)
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ self.poll_job(res, 'portforwardingrule')
+ return portforwarding_rule
+
+
+ def get_result(self, portforwarding_rule):
+ super(AnsibleCloudStackPortforwarding, self).get_result(portforwarding_rule)
+ if portforwarding_rule:
+ # Bad bad API does not always return int when it should.
+ for search_key, return_key in self.returns_to_int.iteritems():
+ if search_key in portforwarding_rule:
+ self.result[return_key] = int(portforwarding_rule[search_key])
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ ip_address = dict(required=True),
+ protocol= dict(choices=['tcp', 'udp'], default='tcp'),
+ public_port = dict(type='int', required=True),
+ public_end_port = dict(type='int', default=None),
+ private_port = dict(type='int', required=True),
+ private_end_port = dict(type='int', default=None),
+ state = dict(choices=['present', 'absent'], default='present'),
+ open_firewall = dict(type='bool', default=False),
+ vm_guest_ip = dict(default=None),
+ vm = dict(default=None),
+ zone = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_pf = AnsibleCloudStackPortforwarding(module)
+ state = module.params.get('state')
+ if state in ['absent']:
+ pf_rule = acs_pf.absent_portforwarding_rule()
+ else:
+ pf_rule = acs_pf.present_portforwarding_rule()
+
+ result = acs_pf.get_result(pf_rule)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_project.py b/lib/ansible/modules/cloud/cloudstack/cs_project.py
new file mode 100644
index 0000000000..472762b432
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_project.py
@@ -0,0 +1,311 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_project
+short_description: Manages projects on Apache CloudStack based clouds.
+description:
+ - Create, update, suspend, activate and remove projects.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the project.
+ required: true
+ display_text:
+ description:
+ - Display text of the project.
+ - If not specified, C(name) will be used as C(display_text).
+ required: false
+ default: null
+ state:
+ description:
+ - State of the project.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent', 'active', 'suspended' ]
+ domain:
+ description:
+ - Domain the project is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the project is related to.
+ required: false
+ default: null
+ tags:
+ description:
+ - List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
+ - "If you want to delete all tags, set a empty list e.g. C(tags: [])."
+ required: false
+ default: null
+ version_added: "2.2"
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create a project
+- local_action:
+ module: cs_project
+ name: web
+ tags:
+ - { key: admin, value: john }
+ - { key: foo, value: bar }
+
+# Rename a project
+- local_action:
+ module: cs_project
+ name: web
+ display_text: my web project
+
+# Suspend an existing project
+- local_action:
+ module: cs_project
+ name: web
+ state: suspended
+
+# Activate an existing project
+- local_action:
+ module: cs_project
+ name: web
+ state: active
+
+# Remove a project
+- local_action:
+ module: cs_project
+ name: web
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the project.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the project.
+ returned: success
+ type: string
+ sample: web project
+display_text:
+ description: Display text of the project.
+ returned: success
+ type: string
+ sample: web project
+state:
+ description: State of the project.
+ returned: success
+ type: string
+ sample: Active
+domain:
+ description: Domain the project is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the project is related to.
+ returned: success
+ type: string
+ sample: example account
+tags:
+ description: List of resource tags associated with the project.
+ returned: success
+ type: dict
+ sample: '[ { "key": "foo", "value": "bar" } ]'
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackProject(AnsibleCloudStack):
+
+
+ def get_project(self):
+ if not self.project:
+ project = self.module.params.get('name')
+
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+
+ projects = self.cs.listProjects(**args)
+ if projects:
+ for p in projects['project']:
+ if project.lower() in [ p['name'].lower(), p['id']]:
+ self.project = p
+ break
+ return self.project
+
+
+ def present_project(self):
+ project = self.get_project()
+ if not project:
+ project = self.create_project(project)
+ else:
+ project = self.update_project(project)
+ if project:
+ project = self.ensure_tags(resource=project, resource_type='project')
+ # refresh resource
+ self.project = project
+ return project
+
+
+ def update_project(self, project):
+ args = {}
+ args['id'] = project['id']
+ args['displaytext'] = self.get_or_fallback('display_text', 'name')
+
+ if self.has_changed(args, project):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ project = self.cs.updateProject(**args)
+
+ if 'errortext' in project:
+ self.module.fail_json(msg="Failed: '%s'" % project['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if project and poll_async:
+ project = self.poll_job(project, 'project')
+ return project
+
+
+ def create_project(self, project):
+ self.result['changed'] = True
+
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['displaytext'] = self.get_or_fallback('display_text', 'name')
+ args['account'] = self.get_account('name')
+ args['domainid'] = self.get_domain('id')
+
+ if not self.module.check_mode:
+ project = self.cs.createProject(**args)
+
+ if 'errortext' in project:
+ self.module.fail_json(msg="Failed: '%s'" % project['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if project and poll_async:
+ project = self.poll_job(project, 'project')
+ return project
+
+
+ def state_project(self, state='active'):
+ project = self.present_project()
+
+ if project['state'].lower() != state:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = project['id']
+
+ if not self.module.check_mode:
+ if state == 'suspended':
+ project = self.cs.suspendProject(**args)
+ else:
+ project = self.cs.activateProject(**args)
+
+ if 'errortext' in project:
+ self.module.fail_json(msg="Failed: '%s'" % project['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if project and poll_async:
+ project = self.poll_job(project, 'project')
+ return project
+
+
+ def absent_project(self):
+ project = self.get_project()
+ if project:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = project['id']
+
+ if not self.module.check_mode:
+ res = self.cs.deleteProject(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ res = self.poll_job(res, 'project')
+ return project
+
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ display_text = dict(default=None),
+ state = dict(choices=['present', 'absent', 'active', 'suspended' ], default='present'),
+ domain = dict(default=None),
+ account = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ tags=dict(type='list', aliases=['tag'], default=None),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_project = AnsibleCloudStackProject(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ project = acs_project.absent_project()
+
+ elif state in ['active', 'suspended']:
+ project = acs_project.state_project(state=state)
+
+ else:
+ project = acs_project.present_project()
+
+ result = acs_project.get_result(project)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_region.py b/lib/ansible/modules/cloud/cloudstack/cs_region.py
new file mode 100644
index 0000000000..74e4c079fa
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_region.py
@@ -0,0 +1,208 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_region
+short_description: Manages regions on Apache CloudStack based clouds.
+description:
+ - Add, update and remove regions.
+version_added: "2.3"
+author: "René Moser (@resmo)"
+options:
+ id:
+ description:
+ - ID of the region.
+ - Must be an number (int).
+ required: true
+ name:
+ description:
+ - Name of the region.
+ - Required if C(state=present)
+ required: false
+ default: null
+ endpoint:
+ description:
+ - Endpoint URL of the region.
+ - Required if C(state=present)
+ required: false
+ default: null
+ state:
+ description:
+ - State of the region.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# create a region
+local_action:
+ module: cs_region
+ id: 2
+ name: geneva
+ endpoint: https://cloud.gva.example.com
+
+# remove a region with ID 2
+local_action:
+ module: cs_region
+ id: 2
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: ID of the region.
+ returned: success
+ type: int
+ sample: 1
+name:
+ description: Name of the region.
+ returned: success
+ type: string
+ sample: local
+endpoint:
+ description: Endpoint of the region.
+ returned: success
+ type: string
+ sample: http://cloud.example.com
+gslb_service_enabled:
+ description: Whether the GSLB service is enabled or not
+ returned: success
+ type: bool
+ sample: true
+portable_ip_service_enabled:
+ description: Whether the portable IP service is enabled or not
+ returned: success
+ type: bool
+ sample: true
+'''
+
+
+from ansible.module_utils.cloudstack import *
+from ansible.module_utils.basic import AnsibleModule
+
+class AnsibleCloudStackRegion(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackRegion, self).__init__(module)
+ self.returns = {
+ 'endpoint': 'endpoint',
+ 'gslbserviceenabled': 'gslb_service_enabled',
+ 'portableipserviceenabled': 'portable_ip_service_enabled',
+ }
+
+ def get_region(self):
+ id = self.module.params.get('id')
+ regions = self.cs.listRegions(id=id)
+ if regions:
+ return regions['region'][0]
+ return None
+
+ def present_region(self):
+ region = self.get_region()
+ if not region:
+ region = self._create_region(region=region)
+ else:
+ region = self._update_region(region=region)
+ return region
+
+ def _create_region(self, region):
+ self.result['changed'] = True
+ args = {
+ 'id': self.module.params.get('id'),
+ 'name': self.module.params.get('name'),
+ 'endpoint': self.module.params.get('endpoint')
+ }
+ if not self.module.check_mode:
+ res = self.cs.addRegion(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ region = res['region']
+ return region
+
+ def _update_region(self, region):
+ args = {
+ 'id': self.module.params.get('id'),
+ 'name': self.module.params.get('name'),
+ 'endpoint': self.module.params.get('endpoint')
+ }
+ if self.has_changed(args, region):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.updateRegion(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ region = res['region']
+ return region
+
+ def absent_region(self):
+ region = self.get_region()
+ if region:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.removeRegion(id=region['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ return region
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ id=dict(required=True, type='int'),
+ name=dict(default=None),
+ endpoint=dict(default=None),
+ state=dict(choices=['present', 'absent'], default='present'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ required_if=[
+ ('state', 'present', ['name', 'endpoint']),
+ ],
+ supports_check_mode=True
+ )
+
+ try:
+ acs_region = AnsibleCloudStackRegion(module)
+
+ state = module.params.get('state')
+ if state == 'absent':
+ region = acs_region.absent_region()
+ else:
+ region = acs_region.present_region()
+
+ result = acs_region.get_result(region)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_resourcelimit.py b/lib/ansible/modules/cloud/cloudstack/cs_resourcelimit.py
new file mode 100644
index 0000000000..e5bfb7096e
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_resourcelimit.py
@@ -0,0 +1,220 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_resourcelimit
+short_description: Manages resource limits on Apache CloudStack based clouds.
+description:
+ - Manage limits of resources for domains, accounts and projects.
+version_added: "2.1"
+author: "René Moser (@resmo)"
+options:
+ resource_type:
+ description:
+ - Type of the resource.
+ required: true
+ choices:
+ - instance
+ - ip_address
+ - volume
+ - snapshot
+ - template
+ - network
+ - vpc
+ - cpu
+ - memory
+ - primary_storage
+ - secondary_storage
+ aliases: [ 'type' ]
+ limit:
+ description:
+ - Maximum number of the resource.
+ - Default is unlimited C(-1).
+ required: false
+ default: -1
+ aliases: [ 'max' ]
+ domain:
+ description:
+ - Domain the resource is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the resource is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the resource is related to.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Update a resource limit for instances of a domain
+local_action:
+ module: cs_resourcelimit
+ type: instance
+ limit: 10
+ domain: customers
+
+# Update a resource limit for instances of an account
+local_action:
+ module: cs_resourcelimit
+ type: instance
+ limit: 12
+ account: moserre
+ domain: customers
+'''
+
+RETURN = '''
+---
+recource_type:
+ description: Type of the resource
+ returned: success
+ type: string
+ sample: instance
+limit:
+ description: Maximum number of the resource.
+ returned: success
+ type: int
+ sample: -1
+domain:
+ description: Domain the resource is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the resource is related to.
+ returned: success
+ type: string
+ sample: example account
+project:
+ description: Project the resource is related to.
+ returned: success
+ type: string
+ sample: example project
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+RESOURCE_TYPES = {
+ 'instance': 0,
+ 'ip_address': 1,
+ 'volume': 2,
+ 'snapshot': 3,
+ 'template': 4,
+ 'network': 6,
+ 'vpc': 7,
+ 'cpu': 8,
+ 'memory': 9,
+ 'primary_storage': 10,
+ 'secondary_storage': 11,
+}
+
+class AnsibleCloudStackResourceLimit(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackResourceLimit, self).__init__(module)
+ self.returns = {
+ 'max': 'limit',
+ }
+
+
+ def get_resource_type(self):
+ resource_type = self.module.params.get('resource_type')
+ return RESOURCE_TYPES.get(resource_type)
+
+
+ def get_resource_limit(self):
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['resourcetype'] = self.get_resource_type()
+ resource_limit = self.cs.listResourceLimits(**args)
+ if resource_limit:
+ return resource_limit['resourcelimit'][0]
+ self.module.fail_json(msg="Resource limit type '%s' not found." % self.module.params.get('resource_type'))
+
+
+ def update_resource_limit(self):
+ resource_limit = self.get_resource_limit()
+
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['resourcetype'] = self.get_resource_type()
+ args['max'] = self.module.params.get('limit', -1)
+
+ if self.has_changed(args, resource_limit):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.updateResourceLimit(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ resource_limit = res['resourcelimit']
+ return resource_limit
+
+
+ def get_result(self, resource_limit):
+ self.result = super(AnsibleCloudStackResourceLimit, self).get_result(resource_limit)
+ self.result['resource_type'] = self.module.params.get('resource_type')
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ resource_type = dict(required=True, choices=RESOURCE_TYPES.keys(), aliases=['type']),
+ limit = dict(default=-1, aliases=['max']),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_resource_limit = AnsibleCloudStackResourceLimit(module)
+ resource_limit = acs_resource_limit.update_resource_limit()
+ result = acs_resource_limit.get_result(resource_limit)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_router.py b/lib/ansible/modules/cloud/cloudstack/cs_router.py
new file mode 100644
index 0000000000..49a2dbe7b6
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_router.py
@@ -0,0 +1,378 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_router
+short_description: Manages routers on Apache CloudStack based clouds.
+description:
+ - Start, restart, stop and destroy routers.
+ - C(state=present) is not able to create routers, use M(cs_network) instead.
+version_added: "2.2"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the router.
+ required: true
+ service_offering:
+ description:
+ - Name or id of the service offering of the router.
+ required: false
+ default: null
+ domain:
+ description:
+ - Domain the router is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the router is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the router is related to.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the router.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent', 'started', 'stopped', 'restarted' ]
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Ensure the router has the desired service offering, no matter if
+# the router is running or not.
+- local_action:
+ module: cs_router
+ name: r-40-VM
+ service_offering: System Offering for Software Router
+
+# Ensure started
+- local_action:
+ module: cs_router
+ name: r-40-VM
+ state: started
+
+# Ensure started with desired service offering.
+# If the service offerings changes, router will be rebooted.
+- local_action:
+ module: cs_router
+ name: r-40-VM
+ service_offering: System Offering for Software Router
+ state: started
+
+# Ensure stopped
+- local_action:
+ module: cs_router
+ name: r-40-VM
+ state: stopped
+
+# Remove a router
+- local_action:
+ module: cs_router
+ name: r-40-VM
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the router.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the router.
+ returned: success
+ type: string
+ sample: r-40-VM
+created:
+ description: Date of the router was created.
+ returned: success
+ type: string
+ sample: 2014-12-01T14:57:57+0100
+template_version:
+ description: Version of the system VM template.
+ returned: success
+ type: string
+ sample: 4.5.1
+requires_upgrade:
+ description: Whether the router needs to be upgraded to the new template.
+ returned: success
+ type: bool
+ sample: false
+redundant_state:
+ description: Redundant state of the router.
+ returned: success
+ type: string
+ sample: UNKNOWN
+role:
+ description: Role of the router.
+ returned: success
+ type: string
+ sample: VIRTUAL_ROUTER
+zone:
+ description: Name of zone the router is in.
+ returned: success
+ type: string
+ sample: ch-gva-2
+service_offering:
+ description: Name of the service offering the router has.
+ returned: success
+ type: string
+ sample: System Offering For Software Router
+state:
+ description: State of the router.
+ returned: success
+ type: string
+ sample: Active
+domain:
+ description: Domain the router is related to.
+ returned: success
+ type: string
+ sample: ROOT
+account:
+ description: Account the router is related to.
+ returned: success
+ type: string
+ sample: admin
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackRouter(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackRouter, self).__init__(module)
+ self.returns = {
+ 'serviceofferingname': 'service_offering',
+ 'version': 'template_version',
+ 'requiresupgrade': 'requires_upgrade',
+ 'redundantstate': 'redundant_state',
+ 'role': 'role'
+ }
+ self.router = None
+
+
+ def get_service_offering_id(self):
+ service_offering = self.module.params.get('service_offering')
+ if not service_offering:
+ return None
+
+ args = {}
+ args['issystem'] = True
+
+ service_offerings = self.cs.listServiceOfferings(**args)
+ if service_offerings:
+ for s in service_offerings['serviceoffering']:
+ if service_offering in [ s['name'], s['id'] ]:
+ return s['id']
+ self.module.fail_json(msg="Service offering '%s' not found" % service_offering)
+
+ def get_router(self):
+ if not self.router:
+ router = self.module.params.get('name')
+
+ args = {}
+ args['projectid'] = self.get_project(key='id')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+
+ routers = self.cs.listRouters(**args)
+ if routers:
+ for r in routers['router']:
+ if router.lower() in [ r['name'].lower(), r['id']]:
+ self.router = r
+ break
+ return self.router
+
+ def start_router(self):
+ router = self.get_router()
+ if not router:
+ self.module.fail_json(msg="Router not found")
+
+ if router['state'].lower() != "running":
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = router['id']
+
+ if not self.module.check_mode:
+ res = self.cs.startRouter(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ router = self.poll_job(res, 'router')
+ return router
+
+ def stop_router(self):
+ router = self.get_router()
+ if not router:
+ self.module.fail_json(msg="Router not found")
+
+ if router['state'].lower() != "stopped":
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = router['id']
+
+ if not self.module.check_mode:
+ res = self.cs.stopRouter(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ router = self.poll_job(res, 'router')
+ return router
+
+ def reboot_router(self):
+ router = self.get_router()
+ if not router:
+ self.module.fail_json(msg="Router not found")
+
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = router['id']
+
+ if not self.module.check_mode:
+ res = self.cs.rebootRouter(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ router = self.poll_job(res, 'router')
+ return router
+
+ def absent_router(self):
+ router = self.get_router()
+ if router:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = router['id']
+
+ if not self.module.check_mode:
+ res = self.cs.destroyRouter(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ self.poll_job(res, 'router')
+ return router
+
+
+ def present_router(self):
+ router = self.get_router()
+ if not router:
+ self.module.fail_json(msg="Router can not be created using the API, see cs_network.")
+
+ args = {}
+ args['id'] = router['id']
+ args['serviceofferingid'] = self.get_service_offering_id()
+
+ state = self.module.params.get('state')
+
+ if self.has_changed(args, router):
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ current_state = router['state'].lower()
+
+ self.stop_router()
+ router = self.cs.changeServiceForRouter(**args)
+
+ if 'errortext' in router:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ if state in [ 'restarted', 'started' ]:
+ router = self.start_router()
+
+ # if state=present we get to the state before the service
+ # offering change.
+ elif state == "present" and current_state == "running":
+ router = self.start_router()
+
+ elif state == "started":
+ router = self.start_router()
+
+ elif state == "stopped":
+ router = self.stop_router()
+
+ elif state == "restarted":
+ router = self.reboot_router()
+
+ return router
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ service_offering = dict(default=None),
+ state = dict(choices=['present', 'started', 'stopped', 'restarted', 'absent'], default="present"),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_router = AnsibleCloudStackRouter(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ router = acs_router.absent_router()
+ else:
+ router = acs_router.present_router()
+
+ result = acs_router.get_result(router)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_securitygroup.py b/lib/ansible/modules/cloud/cloudstack/cs_securitygroup.py
new file mode 100644
index 0000000000..c65d63c8f4
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_securitygroup.py
@@ -0,0 +1,223 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_securitygroup
+short_description: Manages security groups on Apache CloudStack based clouds.
+description:
+ - Create and remove security groups.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the security group.
+ required: true
+ description:
+ description:
+ - Description of the security group.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the security group.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ domain:
+ description:
+ - Domain the security group is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the security group is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the security group to be created in.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create a security group
+- local_action:
+ module: cs_securitygroup
+ name: default
+ description: default security group
+
+# Remove a security group
+- local_action:
+ module: cs_securitygroup
+ name: default
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the security group.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+name:
+ description: Name of security group.
+ returned: success
+ type: string
+ sample: app
+description:
+ description: Description of security group.
+ returned: success
+ type: string
+ sample: application security group
+tags:
+ description: List of resource tags associated with the security group.
+ returned: success
+ type: dict
+ sample: '[ { "key": "foo", "value": "bar" } ]'
+project:
+ description: Name of project the security group is related to.
+ returned: success
+ type: string
+ sample: Production
+domain:
+ description: Domain the security group is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the security group is related to.
+ returned: success
+ type: string
+ sample: example account
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackSecurityGroup(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackSecurityGroup, self).__init__(module)
+ self.security_group = None
+
+
+ def get_security_group(self):
+ if not self.security_group:
+
+ args = {}
+ args['projectid'] = self.get_project(key='id')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['securitygroupname'] = self.module.params.get('name')
+
+ sgs = self.cs.listSecurityGroups(**args)
+ if sgs:
+ self.security_group = sgs['securitygroup'][0]
+ return self.security_group
+
+
+ def create_security_group(self):
+ security_group = self.get_security_group()
+ if not security_group:
+ self.result['changed'] = True
+
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['projectid'] = self.get_project(key='id')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['description'] = self.module.params.get('description')
+
+ if not self.module.check_mode:
+ res = self.cs.createSecurityGroup(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ security_group = res['securitygroup']
+
+ return security_group
+
+
+ def remove_security_group(self):
+ security_group = self.get_security_group()
+ if security_group:
+ self.result['changed'] = True
+
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['projectid'] = self.get_project(key='id')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+
+ if not self.module.check_mode:
+ res = self.cs.deleteSecurityGroup(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ return security_group
+
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ description = dict(default=None),
+ state = dict(choices=['present', 'absent'], default='present'),
+ project = dict(default=None),
+ account = dict(default=None),
+ domain = dict(default=None),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_sg = AnsibleCloudStackSecurityGroup(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ sg = acs_sg.remove_security_group()
+ else:
+ sg = acs_sg.create_security_group()
+
+ result = acs_sg.get_result(sg)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_securitygroup_rule.py b/lib/ansible/modules/cloud/cloudstack/cs_securitygroup_rule.py
new file mode 100644
index 0000000000..85617b5baa
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_securitygroup_rule.py
@@ -0,0 +1,425 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_securitygroup_rule
+short_description: Manages security group rules on Apache CloudStack based clouds.
+description:
+ - Add and remove security group rules.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ security_group:
+ description:
+ - Name of the security group the rule is related to. The security group must be existing.
+ required: true
+ state:
+ description:
+ - State of the security group rule.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ protocol:
+ description:
+ - Protocol of the security group rule.
+ required: false
+ default: 'tcp'
+ choices: [ 'tcp', 'udp', 'icmp', 'ah', 'esp', 'gre' ]
+ type:
+ description:
+ - Ingress or egress security group rule.
+ required: false
+ default: 'ingress'
+ choices: [ 'ingress', 'egress' ]
+ cidr:
+ description:
+ - CIDR (full notation) to be used for security group rule.
+ required: false
+ default: '0.0.0.0/0'
+ user_security_group:
+ description:
+ - Security group this rule is based of.
+ required: false
+ default: null
+ start_port:
+ description:
+ - Start port for this rule. Required if C(protocol=tcp) or C(protocol=udp).
+ required: false
+ default: null
+ aliases: [ 'port' ]
+ end_port:
+ description:
+ - End port for this rule. Required if C(protocol=tcp) or C(protocol=udp), but C(start_port) will be used if not set.
+ required: false
+ default: null
+ icmp_type:
+ description:
+ - Type of the icmp message being sent. Required if C(protocol=icmp).
+ required: false
+ default: null
+ icmp_code:
+ description:
+ - Error code for this icmp message. Required if C(protocol=icmp).
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the security group to be created in.
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+---
+# Allow inbound port 80/tcp from 1.2.3.4 added to security group 'default'
+- local_action:
+ module: cs_securitygroup_rule
+ security_group: default
+ port: 80
+ cidr: 1.2.3.4/32
+
+# Allow tcp/udp outbound added to security group 'default'
+- local_action:
+ module: cs_securitygroup_rule
+ security_group: default
+ type: egress
+ start_port: 1
+ end_port: 65535
+ protocol: '{{ item }}'
+ with_items:
+ - tcp
+ - udp
+
+# Allow inbound icmp from 0.0.0.0/0 added to security group 'default'
+- local_action:
+ module: cs_securitygroup_rule
+ security_group: default
+ protocol: icmp
+ icmp_code: -1
+ icmp_type: -1
+
+# Remove rule inbound port 80/tcp from 0.0.0.0/0 from security group 'default'
+- local_action:
+ module: cs_securitygroup_rule
+ security_group: default
+ port: 80
+ state: absent
+
+# Allow inbound port 80/tcp from security group web added to security group 'default'
+- local_action:
+ module: cs_securitygroup_rule
+ security_group: default
+ port: 80
+ user_security_group: web
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the of the rule.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+security_group:
+ description: security group of the rule.
+ returned: success
+ type: string
+ sample: default
+type:
+ description: type of the rule.
+ returned: success
+ type: string
+ sample: ingress
+cidr:
+ description: CIDR of the rule.
+ returned: success and cidr is defined
+ type: string
+ sample: 0.0.0.0/0
+user_security_group:
+ description: user security group of the rule.
+ returned: success and user_security_group is defined
+ type: string
+ sample: default
+protocol:
+ description: protocol of the rule.
+ returned: success
+ type: string
+ sample: tcp
+start_port:
+ description: start port of the rule.
+ returned: success
+ type: int
+ sample: 80
+end_port:
+ description: end port of the rule.
+ returned: success
+ type: int
+ sample: 80
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackSecurityGroupRule(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackSecurityGroupRule, self).__init__(module)
+ self.returns = {
+ 'icmptype': 'icmp_type',
+ 'icmpcode': 'icmp_code',
+ 'endport': 'end_port',
+ 'startport': 'start_port',
+ 'protocol': 'protocol',
+ 'cidr': 'cidr',
+ 'securitygroupname': 'user_security_group',
+ }
+
+
+ def _tcp_udp_match(self, rule, protocol, start_port, end_port):
+ return protocol in ['tcp', 'udp'] \
+ and protocol == rule['protocol'] \
+ and start_port == int(rule['startport']) \
+ and end_port == int(rule['endport'])
+
+
+ def _icmp_match(self, rule, protocol, icmp_code, icmp_type):
+ return protocol == 'icmp' \
+ and protocol == rule['protocol'] \
+ and icmp_code == int(rule['icmpcode']) \
+ and icmp_type == int(rule['icmptype'])
+
+
+ def _ah_esp_gre_match(self, rule, protocol):
+ return protocol in ['ah', 'esp', 'gre'] \
+ and protocol == rule['protocol']
+
+
+ def _type_security_group_match(self, rule, security_group_name):
+ return security_group_name \
+ and 'securitygroupname' in rule \
+ and security_group_name == rule['securitygroupname']
+
+
+ def _type_cidr_match(self, rule, cidr):
+ return 'cidr' in rule \
+ and cidr == rule['cidr']
+
+
+ def _get_rule(self, rules):
+ user_security_group_name = self.module.params.get('user_security_group')
+ cidr = self.module.params.get('cidr')
+ protocol = self.module.params.get('protocol')
+ start_port = self.module.params.get('start_port')
+ end_port = self.get_or_fallback('end_port', 'start_port')
+ icmp_code = self.module.params.get('icmp_code')
+ icmp_type = self.module.params.get('icmp_type')
+
+ if protocol in ['tcp', 'udp'] and not (start_port and end_port):
+ self.module.fail_json(msg="no start_port or end_port set for protocol '%s'" % protocol)
+
+ if protocol == 'icmp' and not (icmp_type and icmp_code):
+ self.module.fail_json(msg="no icmp_type or icmp_code set for protocol '%s'" % protocol)
+
+ for rule in rules:
+ if user_security_group_name:
+ type_match = self._type_security_group_match(rule, user_security_group_name)
+ else:
+ type_match = self._type_cidr_match(rule, cidr)
+
+ protocol_match = ( self._tcp_udp_match(rule, protocol, start_port, end_port) \
+ or self._icmp_match(rule, protocol, icmp_code, icmp_type) \
+ or self._ah_esp_gre_match(rule, protocol)
+ )
+
+ if type_match and protocol_match:
+ return rule
+ return None
+
+
+ def get_security_group(self, security_group_name=None):
+ if not security_group_name:
+ security_group_name = self.module.params.get('security_group')
+ args = {}
+ args['securitygroupname'] = security_group_name
+ args['projectid'] = self.get_project('id')
+ sgs = self.cs.listSecurityGroups(**args)
+ if not sgs or 'securitygroup' not in sgs:
+ self.module.fail_json(msg="security group '%s' not found" % security_group_name)
+ return sgs['securitygroup'][0]
+
+
+ def add_rule(self):
+ security_group = self.get_security_group()
+
+ args = {}
+ user_security_group_name = self.module.params.get('user_security_group')
+
+ # the user_security_group and cidr are mutually_exclusive, but cidr is defaulted to 0.0.0.0/0.
+ # that is why we ignore if we have a user_security_group.
+ if user_security_group_name:
+ args['usersecuritygrouplist'] = []
+ user_security_group = self.get_security_group(user_security_group_name)
+ args['usersecuritygrouplist'].append({
+ 'group': user_security_group['name'],
+ 'account': user_security_group['account'],
+ })
+ else:
+ args['cidrlist'] = self.module.params.get('cidr')
+
+ args['protocol'] = self.module.params.get('protocol')
+ args['startport'] = self.module.params.get('start_port')
+ args['endport'] = self.get_or_fallback('end_port', 'start_port')
+ args['icmptype'] = self.module.params.get('icmp_type')
+ args['icmpcode'] = self.module.params.get('icmp_code')
+ args['projectid'] = self.get_project('id')
+ args['securitygroupid'] = security_group['id']
+
+ rule = None
+ res = None
+ sg_type = self.module.params.get('type')
+ if sg_type == 'ingress':
+ if 'ingressrule' in security_group:
+ rule = self._get_rule(security_group['ingressrule'])
+ if not rule:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.authorizeSecurityGroupIngress(**args)
+
+ elif sg_type == 'egress':
+ if 'egressrule' in security_group:
+ rule = self._get_rule(security_group['egressrule'])
+ if not rule:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.authorizeSecurityGroupEgress(**args)
+
+ if res and 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ security_group = self.poll_job(res, 'securitygroup')
+ key = sg_type + "rule" # ingressrule / egressrule
+ if key in security_group:
+ rule = security_group[key][0]
+ return rule
+
+
+ def remove_rule(self):
+ security_group = self.get_security_group()
+ rule = None
+ res = None
+ sg_type = self.module.params.get('type')
+ if sg_type == 'ingress':
+ rule = self._get_rule(security_group['ingressrule'])
+ if rule:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.revokeSecurityGroupIngress(id=rule['ruleid'])
+
+ elif sg_type == 'egress':
+ rule = self._get_rule(security_group['egressrule'])
+ if rule:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.revokeSecurityGroupEgress(id=rule['ruleid'])
+
+ if res and 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ res = self.poll_job(res, 'securitygroup')
+ return rule
+
+
+ def get_result(self, security_group_rule):
+ super(AnsibleCloudStackSecurityGroupRule, self).get_result(security_group_rule)
+ self.result['type'] = self.module.params.get('type')
+ self.result['security_group'] = self.module.params.get('security_group')
+ return self.result
+
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ security_group = dict(required=True),
+ type = dict(choices=['ingress', 'egress'], default='ingress'),
+ cidr = dict(default='0.0.0.0/0'),
+ user_security_group = dict(default=None),
+ protocol = dict(choices=['tcp', 'udp', 'icmp', 'ah', 'esp', 'gre'], default='tcp'),
+ icmp_type = dict(type='int', default=None),
+ icmp_code = dict(type='int', default=None),
+ start_port = dict(type='int', default=None, aliases=['port']),
+ end_port = dict(type='int', default=None),
+ state = dict(choices=['present', 'absent'], default='present'),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+ required_together = cs_required_together()
+ required_together.extend([
+ ['icmp_type', 'icmp_code'],
+ ])
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=required_together,
+ mutually_exclusive = (
+ ['icmp_type', 'start_port'],
+ ['icmp_type', 'end_port'],
+ ['icmp_code', 'start_port'],
+ ['icmp_code', 'end_port'],
+ ),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_sg_rule = AnsibleCloudStackSecurityGroupRule(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ sg_rule = acs_sg_rule.remove_rule()
+ else:
+ sg_rule = acs_sg_rule.add_rule()
+
+ result = acs_sg_rule.get_result(sg_rule)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_snapshot_policy.py b/lib/ansible/modules/cloud/cloudstack/cs_snapshot_policy.py
new file mode 100644
index 0000000000..157d05e803
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_snapshot_policy.py
@@ -0,0 +1,387 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_snapshot_policy
+short_description: Manages volume snapshot policies on Apache CloudStack based clouds.
+description:
+ - Create, update and delete volume snapshot policies.
+version_added: '2.2'
+author: "René Moser (@resmo)"
+options:
+ volume:
+ description:
+ - Name of the volume.
+ - Either C(volume) or C(vm) is required.
+ required: false
+ default: null
+ volume_type:
+ description:
+ - Type of the volume.
+ required: false
+ default: null
+ choices:
+ - DATADISK
+ - ROOT
+ version_added: "2.3"
+ vm:
+ description:
+ - Name of the instance to select the volume from.
+ - Use C(volume_type) if VM has a DATADISK and ROOT volume.
+ - In case of C(volume_type=DATADISK), additionally use C(device_id) if VM has more than one DATADISK volume.
+ - Either C(volume) or C(vm) is required.
+ required: false
+ default: null
+ version_added: "2.3"
+ device_id:
+ description:
+ - ID of the device on a VM the volume is attached to.
+ - This will only be considered if VM has multiple DATADISK volumes.
+ required: false
+ default: null
+ version_added: "2.3"
+ vpc:
+ description:
+ - Name of the vpc the instance is deployed in.
+ required: false
+ default: null
+ version_added: "2.3"
+ interval_type:
+ description:
+ - Interval of the snapshot.
+ required: false
+ default: 'daily'
+ choices: [ 'hourly', 'daily', 'weekly', 'monthly' ]
+ aliases: [ 'interval' ]
+ max_snaps:
+ description:
+ - Max number of snapshots.
+ required: false
+ default: 8
+ aliases: [ 'max' ]
+ schedule:
+ description:
+ - Time the snapshot is scheduled. Required if C(state=present).
+ - 'Format for C(interval_type=HOURLY): C(MM)'
+ - 'Format for C(interval_type=DAILY): C(MM:HH)'
+ - 'Format for C(interval_type=WEEKLY): C(MM:HH:DD (1-7))'
+ - 'Format for C(interval_type=MONTHLY): C(MM:HH:DD (1-28))'
+ required: false
+ default: null
+ time_zone:
+ description:
+ - Specifies a timezone for this command.
+ required: false
+ default: 'UTC'
+ aliases: [ 'timezone' ]
+ state:
+ description:
+ - State of the snapshot policy.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ domain:
+ description:
+ - Domain the volume is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the volume is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the volume is related to.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Ensure a snapshot policy daily at 1h00 UTC
+- local_action:
+ module: cs_snapshot_policy
+ volume: ROOT-478
+ schedule: '00:1'
+ max_snaps: 3
+
+# Ensure a snapshot policy daily at 1h00 UTC on the second DATADISK of VM web-01
+- local_action:
+ module: cs_snapshot_policy
+ vm: web-01
+ volume_type: DATADISK
+ device_id: 2
+ schedule: '00:1'
+ max_snaps: 3
+
+# Ensure a snapshot policy hourly at minute 5 UTC
+- local_action:
+ module: cs_snapshot_policy
+ volume: ROOT-478
+ schedule: '5'
+ interval_type: hourly
+ max_snaps: 1
+
+# Ensure a snapshot policy weekly on Sunday at 05h00, TZ Europe/Zurich
+- local_action:
+ module: cs_snapshot_policy
+ volume: ROOT-478
+ schedule: '00:5:1'
+ interval_type: weekly
+ max_snaps: 1
+ time_zone: 'Europe/Zurich'
+
+# Ensure a snapshot policy is absent
+- local_action:
+ module: cs_snapshot_policy
+ volume: ROOT-478
+ interval_type: hourly
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the snapshot policy.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+interval_type:
+ description: interval type of the snapshot policy.
+ returned: success
+ type: string
+ sample: daily
+schedule:
+ description: schedule of the snapshot policy.
+ returned: success
+ type: string
+ sample:
+max_snaps:
+ description: maximum number of snapshots retained.
+ returned: success
+ type: int
+ sample: 10
+time_zone:
+ description: the time zone of the snapshot policy.
+ returned: success
+ type: string
+ sample: Etc/UTC
+volume:
+ description: the volume of the snapshot policy.
+ returned: success
+ type: string
+ sample: Etc/UTC
+zone:
+ description: Name of zone the volume is related to.
+ returned: success
+ type: string
+ sample: ch-gva-2
+project:
+ description: Name of project the volume is related to.
+ returned: success
+ type: string
+ sample: Production
+account:
+ description: Account the volume is related to.
+ returned: success
+ type: string
+ sample: example account
+domain:
+ description: Domain the volume is related to.
+ returned: success
+ type: string
+ sample: example domain
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackSnapshotPolicy(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackSnapshotPolicy, self).__init__(module)
+ self.returns = {
+ 'schedule': 'schedule',
+ 'timezone': 'time_zone',
+ 'maxsnaps': 'max_snaps',
+ }
+ self.interval_types = {
+ 'hourly': 0,
+ 'daily': 1,
+ 'weekly': 2,
+ 'monthly': 3,
+ }
+ self.volume = None
+
+ def get_interval_type(self):
+ interval_type = self.module.params.get('interval_type')
+ return self.interval_types[interval_type]
+
+ def get_volume(self, key=None):
+ if self.volume:
+ return self._get_by_key(key, self.volume)
+
+ args = {
+ 'name': self.module.params.get('volume'),
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'projectid': self.get_project(key='id'),
+ 'virtualmachineid': self.get_vm(key='id'),
+ 'type': self.module.params.get('volume_type'),
+ }
+ volumes = self.cs.listVolumes(**args)
+ if volumes:
+ if volumes['count'] > 1:
+ device_id = self.module.params.get('device_id')
+ if not device_id:
+ self.module.fail_json(msg="Found more then 1 volume: combine params 'vm', 'volume_type', 'device_id' and/or 'volume' to select the volume")
+ else:
+ for v in volumes['volume']:
+ if v.get('deviceid') == device_id:
+ self.volume = v
+ return self._get_by_key(key, self.volume)
+ self.module.fail_json(msg="No volume found with device id %s" % device_id)
+ self.volume = volumes['volume'][0]
+ return self._get_by_key(key, self.volume)
+ return None
+
+ def get_snapshot_policy(self):
+ args = {
+ 'volumeid': self.get_volume(key='id')
+ }
+ policies = self.cs.listSnapshotPolicies(**args)
+ if policies:
+ for policy in policies['snapshotpolicy']:
+ if policy['intervaltype'] == self.get_interval_type():
+ return policy
+ return None
+
+ def present_snapshot_policy(self):
+ required_params = [
+ 'schedule',
+ ]
+ self.module.fail_on_missing_params(required_params=required_params)
+
+ policy = self.get_snapshot_policy()
+ args = {
+ 'id': policy.get('id') if policy else None,
+ 'intervaltype': self.module.params.get('interval_type'),
+ 'schedule': self.module.params.get('schedule'),
+ 'maxsnaps': self.module.params.get('max_snaps'),
+ 'timezone': self.module.params.get('time_zone'),
+ 'volumeid': self.get_volume(key='id')
+ }
+ if not policy or (policy and self.has_changed(policy, args, only_keys=['schedule', 'maxsnaps', 'timezone'])):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.createSnapshotPolicy(**args)
+ policy = res['snapshotpolicy']
+ if 'errortext' in policy:
+ self.module.fail_json(msg="Failed: '%s'" % policy['errortext'])
+ return policy
+
+ def absent_snapshot_policy(self):
+ policy = self.get_snapshot_policy()
+ if policy:
+ self.result['changed'] = True
+ args = {
+ 'id': policy['id']
+ }
+ if not self.module.check_mode:
+ res = self.cs.deleteSnapshotPolicies(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % policy['errortext'])
+ return policy
+
+ def get_result(self, policy):
+ super(AnsibleCloudStackSnapshotPolicy, self).get_result(policy)
+ if policy and 'intervaltype' in policy:
+ for key, value in self.interval_types.items():
+ if value == policy['intervaltype']:
+ self.result['interval_type'] = key
+ break
+ volume = self.get_volume()
+ if volume:
+ volume_results = {
+ 'volume': volume.get('name'),
+ 'zone': volume.get('zonename'),
+ 'project': volume.get('project'),
+ 'account': volume.get('account'),
+ 'domain': volume.get('domain'),
+ }
+ self.result.update(volume_results)
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ volume=dict(default=None),
+ volume_type=dict(choices=['DATADISK', 'ROOT'], default=None),
+ vm=dict(default=None),
+ device_id=dict(type='int', default=None),
+ vpc=dict(default=None),
+ interval_type=dict(default='daily', choices=['hourly', 'daily', 'weekly', 'monthly'], aliases=['interval']),
+ schedule=dict(default=None),
+ time_zone=dict(default='UTC', aliases=['timezone']),
+ max_snaps=dict(type='int', default=8, aliases=['max']),
+ state=dict(choices=['present', 'absent'], default='present'),
+ domain=dict(default=None),
+ account=dict(default=None),
+ project=dict(default=None),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ required_one_of = (
+ ['vm', 'volume'],
+ ),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_snapshot_policy = AnsibleCloudStackSnapshotPolicy(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ policy = acs_snapshot_policy.absent_snapshot_policy()
+ else:
+ policy = acs_snapshot_policy.present_snapshot_policy()
+
+ result = acs_snapshot_policy.get_result(policy)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_sshkeypair.py b/lib/ansible/modules/cloud/cloudstack/cs_sshkeypair.py
new file mode 100644
index 0000000000..2724c58c71
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_sshkeypair.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_sshkeypair
+short_description: Manages SSH keys on Apache CloudStack based clouds.
+description:
+ - Create, register and remove SSH keys.
+ - If no key was found and no public key was provided and a new SSH
+ private/public key pair will be created and the private key will be returned.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of public key.
+ required: true
+ domain:
+ description:
+ - Domain the public key is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the public key is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the public key to be registered in.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the public key.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ public_key:
+ description:
+ - String of the public key.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# create a new private / public key pair:
+- cs_sshkeypair:
+ name: linus@example.com
+ delegate_to: localhost
+ register: key
+- debug:
+ msg: 'Private key is {{ key.private_key }}'
+
+# remove a public key by its name:
+- cs_sshkeypair:
+ name: linus@example.com
+ state: absent
+ delegate_to: localhost
+
+# register your existing local public key:
+- cs_sshkeypair:
+ name: linus@example.com
+ public_key: '{{ lookup('file', '~/.ssh/id_rsa.pub') }}'
+ delegate_to: localhost
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the SSH public key.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+name:
+ description: Name of the SSH public key.
+ returned: success
+ type: string
+ sample: linus@example.com
+fingerprint:
+ description: Fingerprint of the SSH public key.
+ returned: success
+ type: string
+ sample: "86:5e:a3:e8:bd:95:7b:07:7c:c2:5c:f7:ad:8b:09:28"
+private_key:
+ description: Private key of generated SSH keypair.
+ returned: changed
+ type: string
+ sample: "-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQCkeFYjI+4k8bWfIRMzp4pCzhlopNydbbwRu824P5ilD4ATWMUG\nvEtuCQ2Mp5k5Bma30CdYHgh2/SbxC5RxXSUKTUJtTKpoJUy8PAhb1nn9dnfkC2oU\naRVi9NRUgypTIZxMpgooHOxvAzWxbZCyh1W+91Ld3FNaGxTLqTgeevY84wIDAQAB\nAoGAcwQwgLyUwsNB1vmjWwE0QEmvHS4FlhZyahhi4hGfZvbzAxSWHIK7YUT1c8KU\n9XsThEIN8aJ3GvcoL3OAqNKRnoNb14neejVHkYRadhxqc0GVN6AUIyCqoEMpvhFI\nQrinM572ORzv5ffRjCTbvZcYlW+sqFKNo5e8pYIB8TigpFECQQDu7bg9vkvg8xPs\nkP1K+EH0vsR6vUfy+m3euXjnbJtiP7RoTkZk0JQMOmexgy1qQhISWT0e451wd62v\nJ7M0trl5AkEAsDivJnMIlCCCypwPN4tdNUYpe9dtidR1zLmb3SA7wXk5xMUgLZI9\ncWPjBCMt0KKShdDhQ+hjXAyKQLF7iAPuOwJABjdHCMwvmy2XwhrPjCjDRoPEBtFv\n0sFzJE08+QBZVogDwIbwy+SlRWArnHGmN9J6N+H8dhZD3U4vxZPJ1MBAOQJBAJxO\nCv1dt1Q76gbwmYa49LnWO+F+2cgRTVODpr5iYt5fOmBQQRRqzFkRMkFvOqn+KVzM\nQ6LKM6dn8BEl295vLhUCQQCVDWzoSk3GjL3sOjfAUTyAj8VAXM69llaptxWWySPM\nE9pA+8rYmHfohYFx7FD5/KWCO+sfmxTNB48X0uwyE8tO\n-----END RSA PRIVATE KEY-----\n"
+'''
+
+try:
+ import sshpubkeys
+ has_lib_sshpubkeys = True
+except ImportError:
+ has_lib_sshpubkeys = False
+
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackSshKey(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackSshKey, self).__init__(module)
+ self.returns = {
+ 'privatekey': 'private_key',
+ 'fingerprint': 'fingerprint',
+ }
+ self.ssh_key = None
+
+
+ def register_ssh_key(self, public_key):
+ ssh_key = self.get_ssh_key()
+ args = {}
+ args['domainid'] = self.get_domain('id')
+ args['account'] = self.get_account('name')
+ args['projectid'] = self.get_project('id')
+ args['name'] = self.module.params.get('name')
+
+ res = None
+ if not ssh_key:
+ self.result['changed'] = True
+ args['publickey'] = public_key
+ if not self.module.check_mode:
+ res = self.cs.registerSSHKeyPair(**args)
+
+ else:
+ fingerprint = self._get_ssh_fingerprint(public_key)
+ if ssh_key['fingerprint'] != fingerprint:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ self.cs.deleteSSHKeyPair(**args)
+ args['publickey'] = public_key
+ res = self.cs.registerSSHKeyPair(**args)
+
+ if res and 'keypair' in res:
+ ssh_key = res['keypair']
+
+ return ssh_key
+
+
+ def create_ssh_key(self):
+ ssh_key = self.get_ssh_key()
+ if not ssh_key:
+ self.result['changed'] = True
+ args = {}
+ args['domainid'] = self.get_domain('id')
+ args['account'] = self.get_account('name')
+ args['projectid'] = self.get_project('id')
+ args['name'] = self.module.params.get('name')
+ if not self.module.check_mode:
+ res = self.cs.createSSHKeyPair(**args)
+ ssh_key = res['keypair']
+ return ssh_key
+
+
+ def remove_ssh_key(self):
+ ssh_key = self.get_ssh_key()
+ if ssh_key:
+ self.result['changed'] = True
+ args = {}
+ args['domainid'] = self.get_domain('id')
+ args['account'] = self.get_account('name')
+ args['projectid'] = self.get_project('id')
+ args['name'] = self.module.params.get('name')
+ if not self.module.check_mode:
+ res = self.cs.deleteSSHKeyPair(**args)
+ return ssh_key
+
+
+ def get_ssh_key(self):
+ if not self.ssh_key:
+ args = {}
+ args['domainid'] = self.get_domain('id')
+ args['account'] = self.get_account('name')
+ args['projectid'] = self.get_project('id')
+ args['name'] = self.module.params.get('name')
+
+ ssh_keys = self.cs.listSSHKeyPairs(**args)
+ if ssh_keys and 'sshkeypair' in ssh_keys:
+ self.ssh_key = ssh_keys['sshkeypair'][0]
+ return self.ssh_key
+
+
+
+ def _get_ssh_fingerprint(self, public_key):
+ key = sshpubkeys.SSHKey(public_key)
+ return key.hash()
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ public_key = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ state = dict(choices=['present', 'absent'], default='present'),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ if not has_lib_sshpubkeys:
+ module.fail_json(msg="python library sshpubkeys required: pip install sshpubkeys")
+
+ try:
+ acs_sshkey = AnsibleCloudStackSshKey(module)
+ state = module.params.get('state')
+ if state in ['absent']:
+ ssh_key = acs_sshkey.remove_ssh_key()
+ else:
+ public_key = module.params.get('public_key')
+ if public_key:
+ ssh_key = acs_sshkey.register_ssh_key(public_key)
+ else:
+ ssh_key = acs_sshkey.create_ssh_key()
+
+ result = acs_sshkey.get_result(ssh_key)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_staticnat.py b/lib/ansible/modules/cloud/cloudstack/cs_staticnat.py
new file mode 100644
index 0000000000..a805a1c8bb
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_staticnat.py
@@ -0,0 +1,282 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_staticnat
+short_description: Manages static NATs on Apache CloudStack based clouds.
+description:
+ - Create, update and remove static NATs.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ ip_address:
+ description:
+ - Public IP address the static NAT is assigned to.
+ required: true
+ vm:
+ description:
+ - Name of virtual machine which we make the static NAT for.
+ - Required if C(state=present).
+ required: false
+ default: null
+ vm_guest_ip:
+ description:
+ - VM guest NIC secondary IP address for the static NAT.
+ required: false
+ default: false
+ network:
+ description:
+ - Network the IP address is related to.
+ required: false
+ default: null
+ version_added: "2.2"
+ vpc:
+ description:
+ - Name of the VPC.
+ required: false
+ default: null
+ version_added: "2.3"
+ state:
+ description:
+ - State of the static NAT.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent' ]
+ domain:
+ description:
+ - Domain the static NAT is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the static NAT is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the static NAT is related to.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the virtual machine is in.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# create a static NAT: 1.2.3.4 -> web01
+- local_action:
+ module: cs_staticnat
+ ip_address: 1.2.3.4
+ vm: web01
+
+# remove a static NAT
+- local_action:
+ module: cs_staticnat
+ ip_address: 1.2.3.4
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the ip_address.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+ip_address:
+ description: Public IP address.
+ returned: success
+ type: string
+ sample: 1.2.3.4
+vm_name:
+ description: Name of the virtual machine.
+ returned: success
+ type: string
+ sample: web-01
+vm_display_name:
+ description: Display name of the virtual machine.
+ returned: success
+ type: string
+ sample: web-01
+vm_guest_ip:
+ description: IP of the virtual machine.
+ returned: success
+ type: string
+ sample: 10.101.65.152
+zone:
+ description: Name of zone the static NAT is related to.
+ returned: success
+ type: string
+ sample: ch-gva-2
+project:
+ description: Name of project the static NAT is related to.
+ returned: success
+ type: string
+ sample: Production
+account:
+ description: Account the static NAT is related to.
+ returned: success
+ type: string
+ sample: example account
+domain:
+ description: Domain the static NAT is related to.
+ returned: success
+ type: string
+ sample: example domain
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackStaticNat(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackStaticNat, self).__init__(module)
+ self.returns = {
+ 'virtualmachinedisplayname': 'vm_display_name',
+ 'virtualmachinename': 'vm_name',
+ 'ipaddress': 'ip_address',
+ 'vmipaddress': 'vm_guest_ip',
+ }
+
+
+ def create_static_nat(self, ip_address):
+ self.result['changed'] = True
+ args = {}
+ args['virtualmachineid'] = self.get_vm(key='id')
+ args['ipaddressid'] = ip_address['id']
+ args['vmguestip'] = self.get_vm_guest_ip()
+ args['networkid'] = self.get_network(key='id')
+ if not self.module.check_mode:
+ res = self.cs.enableStaticNat(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ # reset ip address and query new values
+ self.ip_address = None
+ ip_address = self.get_ip_address()
+ return ip_address
+
+
+ def update_static_nat(self, ip_address):
+ args = {}
+ args['virtualmachineid'] = self.get_vm(key='id')
+ args['ipaddressid'] = ip_address['id']
+ args['vmguestip'] = self.get_vm_guest_ip()
+
+ # make an alias, so we can use _has_changed()
+ ip_address['vmguestip'] = ip_address['vmipaddress']
+ if self.has_changed(args, ip_address, ['vmguestip', 'virtualmachineid']):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.disableStaticNat(ipaddressid=ip_address['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ self.poll_job(res, 'staticnat')
+ res = self.cs.enableStaticNat(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ # reset ip address and query new values
+ self.ip_address = None
+ ip_address = self.get_ip_address()
+ return ip_address
+
+
+ def present_static_nat(self):
+ ip_address = self.get_ip_address()
+ if not ip_address['isstaticnat']:
+ ip_address = self.create_static_nat(ip_address)
+ else:
+ ip_address = self.update_static_nat(ip_address)
+ return ip_address
+
+
+ def absent_static_nat(self):
+ ip_address = self.get_ip_address()
+ if ip_address['isstaticnat']:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.disableStaticNat(ipaddressid=ip_address['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ self.poll_job(res, 'staticnat')
+ return ip_address
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ ip_address = dict(required=True),
+ vm = dict(default=None),
+ vm_guest_ip = dict(default=None),
+ network = dict(default=None),
+ vpc = dict(default=None),
+ state = dict(choices=['present', 'absent'], default='present'),
+ zone = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_static_nat = AnsibleCloudStackStaticNat(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ ip_address = acs_static_nat.absent_static_nat()
+ else:
+ ip_address = acs_static_nat.present_static_nat()
+
+ result = acs_static_nat.get_result(ip_address)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_template.py b/lib/ansible/modules/cloud/cloudstack/cs_template.py
new file mode 100644
index 0000000000..7e6d74e9c6
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_template.py
@@ -0,0 +1,672 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_template
+short_description: Manages templates on Apache CloudStack based clouds.
+description:
+ - Register a template from URL, create a template from a ROOT volume of a stopped VM or its snapshot, extract and delete templates.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the template.
+ required: true
+ url:
+ description:
+ - URL of where the template is hosted on C(state=present).
+ - URL to which the template would be extracted on C(state=extracted).
+ - Mutually exclusive with C(vm).
+ required: false
+ default: null
+ vm:
+ description:
+ - VM name the template will be created from its volume or alternatively from a snapshot.
+ - VM must be in stopped state if created from its volume.
+ - Mutually exclusive with C(url).
+ required: false
+ default: null
+ snapshot:
+ description:
+ - Name of the snapshot, created from the VM ROOT volume, the template will be created from.
+ - C(vm) is required together with this argument.
+ required: false
+ default: null
+ os_type:
+ description:
+ - OS type that best represents the OS of this template.
+ required: false
+ default: null
+ checksum:
+ description:
+ - The MD5 checksum value of this template.
+ - If set, we search by checksum instead of name.
+ required: false
+ default: false
+ is_ready:
+ description:
+ - This flag is used for searching existing templates.
+ - If set to C(true), it will only list template ready for deployment e.g. successfully downloaded and installed.
+ - Recommended to set it to C(false).
+ required: false
+ default: false
+ is_public:
+ description:
+ - Register the template to be publicly available to all users.
+ - Only used if C(state) is present.
+ required: false
+ default: false
+ is_featured:
+ description:
+ - Register the template to be featured.
+ - Only used if C(state) is present.
+ required: false
+ default: false
+ is_dynamically_scalable:
+ description:
+ - Register the template having XS/VMWare tools installed in order to support dynamic scaling of VM CPU/memory.
+ - Only used if C(state) is present.
+ required: false
+ default: false
+ cross_zones:
+ description:
+ - Whether the template should be synced or removed across zones.
+ - Only used if C(state) is present or absent.
+ required: false
+ default: false
+ mode:
+ description:
+ - Mode for the template extraction.
+ - Only used if C(state=extracted).
+ required: false
+ default: 'http_download'
+ choices: [ 'http_download', 'ftp_upload' ]
+ domain:
+ description:
+ - Domain the template, snapshot or VM is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the template, snapshot or VM is related to.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the template to be registered in.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone you wish the template to be registered or deleted from.
+ - If not specified, first found zone will be used.
+ required: false
+ default: null
+ template_filter:
+ description:
+ - Name of the filter used to search for the template.
+ required: false
+ default: 'self'
+ choices: [ 'featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community' ]
+ hypervisor:
+ description:
+ - Name the hypervisor to be used for creating the new template.
+ - Relevant when using C(state=present).
+ required: false
+ default: null
+ choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ]
+ requires_hvm:
+ description:
+ - true if this template requires HVM.
+ required: false
+ default: false
+ password_enabled:
+ description:
+ - True if the template supports the password reset feature.
+ required: false
+ default: false
+ template_tag:
+ description:
+ - the tag for this template.
+ required: false
+ default: null
+ sshkey_enabled:
+ description:
+ - True if the template supports the sshkey upload feature.
+ required: false
+ default: false
+ is_routing:
+ description:
+ - True if the template type is routing i.e., if template is used to deploy router.
+ - Only considered if C(url) is used.
+ required: false
+ default: false
+ format:
+ description:
+ - The format for the template.
+ - Relevant when using C(state=present).
+ required: false
+ default: null
+ choices: [ 'QCOW2', 'RAW', 'VHD', 'OVA' ]
+ is_extractable:
+ description:
+ - True if the template or its derivatives are extractable.
+ required: false
+ default: false
+ details:
+ description:
+ - Template details in key/value pairs.
+ required: false
+ default: null
+ bits:
+ description:
+ - 32 or 64 bits support.
+ required: false
+ default: '64'
+ display_text:
+ description:
+ - Display text of the template.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the template.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent', 'extacted' ]
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Register a systemvm template
+- local_action:
+ module: cs_template
+ name: systemvm-vmware-4.5
+ url: "http://packages.shapeblue.com/systemvmtemplate/4.5/systemvm64template-4.5-vmware.ova"
+ hypervisor: VMware
+ format: OVA
+ cross_zones: yes
+ os_type: Debian GNU/Linux 7(64-bit)
+
+# Create a template from a stopped virtual machine's volume
+- local_action:
+ module: cs_template
+ name: debian-base-template
+ vm: debian-base-vm
+ os_type: Debian GNU/Linux 7(64-bit)
+ zone: tokio-ix
+ password_enabled: yes
+ is_public: yes
+
+# Create a template from a virtual machine's root volume snapshot
+- local_action:
+ module: cs_template
+ name: debian-base-template
+ vm: debian-base-vm
+ snapshot: ROOT-233_2015061509114
+ os_type: Debian GNU/Linux 7(64-bit)
+ zone: tokio-ix
+ password_enabled: yes
+ is_public: yes
+
+# Remove a template
+- local_action:
+ module: cs_template
+ name: systemvm-4.2
+ cross_zones: yes
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the template.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+name:
+ description: Name of the template.
+ returned: success
+ type: string
+ sample: Debian 7 64-bit
+display_text:
+ description: Display text of the template.
+ returned: success
+ type: string
+ sample: Debian 7.7 64-bit minimal 2015-03-19
+checksum:
+ description: MD5 checksum of the template.
+ returned: success
+ type: string
+ sample: 0b31bccccb048d20b551f70830bb7ad0
+status:
+ description: Status of the template.
+ returned: success
+ type: string
+ sample: Download Complete
+is_ready:
+ description: True if the template is ready to be deployed from.
+ returned: success
+ type: boolean
+ sample: true
+is_public:
+ description: True if the template is public.
+ returned: success
+ type: boolean
+ sample: true
+is_featured:
+ description: True if the template is featured.
+ returned: success
+ type: boolean
+ sample: true
+is_extractable:
+ description: True if the template is extractable.
+ returned: success
+ type: boolean
+ sample: true
+format:
+ description: Format of the template.
+ returned: success
+ type: string
+ sample: OVA
+os_type:
+ description: Typo of the OS.
+ returned: success
+ type: string
+ sample: CentOS 6.5 (64-bit)
+password_enabled:
+ description: True if the reset password feature is enabled, false otherwise.
+ returned: success
+ type: boolean
+ sample: false
+sshkey_enabled:
+ description: true if template is sshkey enabled, false otherwise.
+ returned: success
+ type: boolean
+ sample: false
+cross_zones:
+ description: true if the template is managed across all zones, false otherwise.
+ returned: success
+ type: boolean
+ sample: false
+template_type:
+ description: Type of the template.
+ returned: success
+ type: string
+ sample: USER
+created:
+ description: Date of registering.
+ returned: success
+ type: string
+ sample: 2015-03-29T14:57:06+0200
+template_tag:
+ description: Template tag related to this template.
+ returned: success
+ type: string
+ sample: special
+hypervisor:
+ description: Hypervisor related to this template.
+ returned: success
+ type: string
+ sample: VMware
+mode:
+ description: Mode of extraction
+ returned: success
+ type: string
+ sample: http_download
+state:
+ description: State of the extracted template
+ returned: success
+ type: string
+ sample: DOWNLOAD_URL_CREATED
+url:
+ description: Url to which the template is extracted to
+ returned: success
+ type: string
+ sample: "http://1.2.3.4/userdata/eb307f13-4aca-45e8-b157-a414a14e6b04.ova"
+tags:
+ description: List of resource tags associated with the template.
+ returned: success
+ type: dict
+ sample: '[ { "key": "foo", "value": "bar" } ]'
+zone:
+ description: Name of zone the template is registered in.
+ returned: success
+ type: string
+ sample: zuerich
+domain:
+ description: Domain the template is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the template is related to.
+ returned: success
+ type: string
+ sample: example account
+project:
+ description: Name of project the template is related to.
+ returned: success
+ type: string
+ sample: Production
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackTemplate(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackTemplate, self).__init__(module)
+ self.returns = {
+ 'checksum': 'checksum',
+ 'status': 'status',
+ 'isready': 'is_ready',
+ 'templatetag': 'template_tag',
+ 'sshkeyenabled': 'sshkey_enabled',
+ 'passwordenabled': 'password_enabled',
+ 'tempaltetype': 'template_type',
+ 'ostypename': 'os_type',
+ 'crossZones': 'cross_zones',
+ 'isextractable': 'is_extractable',
+ 'isfeatured': 'is_featured',
+ 'ispublic': 'is_public',
+ 'format': 'format',
+ 'hypervisor': 'hypervisor',
+ 'url': 'url',
+ 'extractMode': 'mode',
+ 'state': 'state',
+ }
+
+
+ def _get_args(self):
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['displaytext'] = self.get_or_fallback('display_text', 'name')
+ args['bits'] = self.module.params.get('bits')
+ args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable')
+ args['isextractable'] = self.module.params.get('is_extractable')
+ args['isfeatured'] = self.module.params.get('is_featured')
+ args['ispublic'] = self.module.params.get('is_public')
+ args['passwordenabled'] = self.module.params.get('password_enabled')
+ args['requireshvm'] = self.module.params.get('requires_hvm')
+ args['templatetag'] = self.module.params.get('template_tag')
+ args['ostypeid'] = self.get_os_type(key='id')
+
+ if not args['ostypeid']:
+ self.module.fail_json(msg="Missing required arguments: os_type")
+
+ return args
+
+
+ def get_root_volume(self, key=None):
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['virtualmachineid'] = self.get_vm(key='id')
+ args['type'] = "ROOT"
+
+ volumes = self.cs.listVolumes(**args)
+ if volumes:
+ return self._get_by_key(key, volumes['volume'][0])
+ self.module.fail_json(msg="Root volume for '%s' not found" % self.get_vm('name'))
+
+
+ def get_snapshot(self, key=None):
+ snapshot = self.module.params.get('snapshot')
+ if not snapshot:
+ return None
+
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['volumeid'] = self.get_root_volume('id')
+ snapshots = self.cs.listSnapshots(**args)
+ if snapshots:
+ for s in snapshots['snapshot']:
+ if snapshot in [ s['name'], s['id'] ]:
+ return self._get_by_key(key, s)
+ self.module.fail_json(msg="Snapshot '%s' not found" % snapshot)
+
+
+ def create_template(self):
+ template = self.get_template()
+ if not template:
+ self.result['changed'] = True
+
+ args = self._get_args()
+ snapshot_id = self.get_snapshot(key='id')
+ if snapshot_id:
+ args['snapshotid'] = snapshot_id
+ else:
+ args['volumeid'] = self.get_root_volume('id')
+
+ if not self.module.check_mode:
+ template = self.cs.createTemplate(**args)
+
+ if 'errortext' in template:
+ self.module.fail_json(msg="Failed: '%s'" % template['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ template = self.poll_job(template, 'template')
+ return template
+
+
+ def register_template(self):
+ required_params = [
+ 'format',
+ 'url',
+ 'hypervisor',
+ ]
+ self.module.fail_on_missing_params(required_params=required_params)
+ template = self.get_template()
+ if not template:
+ self.result['changed'] = True
+ args = self._get_args()
+ args['url'] = self.module.params.get('url')
+ args['format'] = self.module.params.get('format')
+ args['checksum'] = self.module.params.get('checksum')
+ args['isextractable'] = self.module.params.get('is_extractable')
+ args['isrouting'] = self.module.params.get('is_routing')
+ args['sshkeyenabled'] = self.module.params.get('sshkey_enabled')
+ args['hypervisor'] = self.get_hypervisor()
+ args['domainid'] = self.get_domain(key='id')
+ args['account'] = self.get_account(key='name')
+ args['projectid'] = self.get_project(key='id')
+
+ if not self.module.params.get('cross_zones'):
+ args['zoneid'] = self.get_zone(key='id')
+ else:
+ args['zoneid'] = -1
+
+ if not self.module.check_mode:
+ res = self.cs.registerTemplate(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ template = res['template']
+ return template
+
+
+ def get_template(self):
+ args = {}
+ args['isready'] = self.module.params.get('is_ready')
+ args['templatefilter'] = self.module.params.get('template_filter')
+ args['domainid'] = self.get_domain(key='id')
+ args['account'] = self.get_account(key='name')
+ args['projectid'] = self.get_project(key='id')
+
+ if not self.module.params.get('cross_zones'):
+ args['zoneid'] = self.get_zone(key='id')
+
+ # if checksum is set, we only look on that.
+ checksum = self.module.params.get('checksum')
+ if not checksum:
+ args['name'] = self.module.params.get('name')
+
+ templates = self.cs.listTemplates(**args)
+ if templates:
+ # if checksum is set, we only look on that.
+ if not checksum:
+ return templates['template'][0]
+ else:
+ for i in templates['template']:
+ if 'checksum' in i and i['checksum'] == checksum:
+ return i
+ return None
+
+
+ def extract_template(self):
+ template = self.get_template()
+ if not template:
+ self.module.fail_json(msg="Failed: template not found")
+
+ args = {}
+ args['id'] = template['id']
+ args['url'] = self.module.params.get('url')
+ args['mode'] = self.module.params.get('mode')
+ args['zoneid'] = self.get_zone(key='id')
+
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ template = self.cs.extractTemplate(**args)
+
+ if 'errortext' in template:
+ self.module.fail_json(msg="Failed: '%s'" % template['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ template = self.poll_job(template, 'template')
+ return template
+
+
+ def remove_template(self):
+ template = self.get_template()
+ if template:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = template['id']
+
+ if not self.module.params.get('cross_zones'):
+ args['zoneid'] = self.get_zone(key='id')
+
+ if not self.module.check_mode:
+ res = self.cs.deleteTemplate(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ res = self.poll_job(res, 'template')
+ return template
+
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ display_text = dict(default=None),
+ url = dict(default=None),
+ vm = dict(default=None),
+ snapshot = dict(default=None),
+ os_type = dict(default=None),
+ is_ready = dict(type='bool', default=False),
+ is_public = dict(type='bool', default=True),
+ is_featured = dict(type='bool', default=False),
+ is_dynamically_scalable = dict(type='bool', default=False),
+ is_extractable = dict(type='bool', default=False),
+ is_routing = dict(type='bool', default=False),
+ checksum = dict(default=None),
+ template_filter = dict(default='self', choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']),
+ hypervisor = dict(choices=CS_HYPERVISORS, default=None),
+ requires_hvm = dict(type='bool', default=False),
+ password_enabled = dict(type='bool', default=False),
+ template_tag = dict(default=None),
+ sshkey_enabled = dict(type='bool', default=False),
+ format = dict(choices=['QCOW2', 'RAW', 'VHD', 'OVA'], default=None),
+ details = dict(default=None),
+ bits = dict(type='int', choices=[ 32, 64 ], default=64),
+ state = dict(choices=['present', 'absent', 'extracted'], default='present'),
+ cross_zones = dict(type='bool', default=False),
+ mode = dict(choices=['http_download', 'ftp_upload'], default='http_download'),
+ zone = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ mutually_exclusive = (
+ ['url', 'vm'],
+ ['zone', 'cross_zones'],
+ ),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_tpl = AnsibleCloudStackTemplate(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ tpl = acs_tpl.remove_template()
+
+ elif state in ['extracted']:
+ tpl = acs_tpl.extract_template()
+
+ else:
+ if module.params.get('url'):
+ tpl = acs_tpl.register_template()
+ elif module.params.get('vm'):
+ tpl = acs_tpl.create_template()
+ else:
+ module.fail_json(msg="one of the following is required on state=present: url,vm")
+
+ result = acs_tpl.get_result(tpl)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_user.py b/lib/ansible/modules/cloud/cloudstack/cs_user.py
new file mode 100644
index 0000000000..f9f43322e4
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_user.py
@@ -0,0 +1,455 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_user
+short_description: Manages users on Apache CloudStack based clouds.
+description:
+ - Create, update, disable, lock, enable and remove users.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ username:
+ description:
+ - Username of the user.
+ required: true
+ account:
+ description:
+ - Account the user will be created under.
+ - Required on C(state=present).
+ required: false
+ default: null
+ password:
+ description:
+ - Password of the user to be created.
+ - Required on C(state=present).
+ - Only considered on creation and will not be updated if user exists.
+ required: false
+ default: null
+ first_name:
+ description:
+ - First name of the user.
+ - Required on C(state=present).
+ required: false
+ default: null
+ last_name:
+ description:
+ - Last name of the user.
+ - Required on C(state=present).
+ required: false
+ default: null
+ email:
+ description:
+ - Email of the user.
+ - Required on C(state=present).
+ required: false
+ default: null
+ timezone:
+ description:
+ - Timezone of the user.
+ required: false
+ default: null
+ domain:
+ description:
+ - Domain the user is related to.
+ required: false
+ default: 'ROOT'
+ state:
+ description:
+ - State of the user.
+ - C(unlocked) is an alias for C(enabled).
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked' ]
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# create an user in domain 'CUSTOMERS'
+local_action:
+ module: cs_user
+ account: developers
+ username: johndoe
+ password: S3Cur3
+ last_name: Doe
+ first_name: John
+ email: john.doe@example.com
+ domain: CUSTOMERS
+
+# Lock an existing user in domain 'CUSTOMERS'
+local_action:
+ module: cs_user
+ username: johndoe
+ domain: CUSTOMERS
+ state: locked
+
+# Disable an existing user in domain 'CUSTOMERS'
+local_action:
+ module: cs_user
+ username: johndoe
+ domain: CUSTOMERS
+ state: disabled
+
+# Enable/unlock an existing user in domain 'CUSTOMERS'
+local_action:
+ module: cs_user
+ username: johndoe
+ domain: CUSTOMERS
+ state: enabled
+
+# Remove an user in domain 'CUSTOMERS'
+local_action:
+ module: cs_user
+ name: customer_xy
+ domain: CUSTOMERS
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the user.
+ returned: success
+ type: string
+ sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
+username:
+ description: Username of the user.
+ returned: success
+ type: string
+ sample: johndoe
+fist_name:
+ description: First name of the user.
+ returned: success
+ type: string
+ sample: John
+last_name:
+ description: Last name of the user.
+ returned: success
+ type: string
+ sample: Doe
+email:
+ description: Emailof the user.
+ returned: success
+ type: string
+ sample: john.doe@example.com
+api_key:
+ description: API key of the user.
+ returned: success
+ type: string
+ sample: JLhcg8VWi8DoFqL2sSLZMXmGojcLnFrOBTipvBHJjySODcV4mCOo29W2duzPv5cALaZnXj5QxDx3xQfaQt3DKg
+api_secret:
+ description: API secret of the user.
+ returned: success
+ type: string
+ sample: FUELo3LB9fa1UopjTLPdqLv_6OXQMJZv9g9N4B_Ao3HFz8d6IGFCV9MbPFNM8mwz00wbMevja1DoUNDvI8C9-g
+account:
+ description: Account name of the user.
+ returned: success
+ type: string
+ sample: developers
+account_type:
+ description: Type of the account.
+ returned: success
+ type: string
+ sample: user
+timezone:
+ description: Timezone of the user.
+ returned: success
+ type: string
+ sample: enabled
+created:
+ description: Date the user was created.
+ returned: success
+ type: string
+ sample: Doe
+state:
+ description: State of the user.
+ returned: success
+ type: string
+ sample: enabled
+domain:
+ description: Domain the user is related.
+ returned: success
+ type: string
+ sample: ROOT
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackUser(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackUser, self).__init__(module)
+ self.returns = {
+ 'username': 'username',
+ 'firstname': 'first_name',
+ 'lastname': 'last_name',
+ 'email': 'email',
+ 'secretkey': 'api_secret',
+ 'apikey': 'api_key',
+ 'timezone': 'timezone',
+ }
+ self.account_types = {
+ 'user': 0,
+ 'root_admin': 1,
+ 'domain_admin': 2,
+ }
+ self.user = None
+
+
+ def get_account_type(self):
+ account_type = self.module.params.get('account_type')
+ return self.account_types[account_type]
+
+
+ def get_user(self):
+ if not self.user:
+ args = {}
+ args['domainid'] = self.get_domain('id')
+ users = self.cs.listUsers(**args)
+ if users:
+ user_name = self.module.params.get('username')
+ for u in users['user']:
+ if user_name.lower() == u['username'].lower():
+ self.user = u
+ break
+ return self.user
+
+
+ def enable_user(self):
+ user = self.get_user()
+ if not user:
+ user = self.present_user()
+
+ if user['state'].lower() != 'enabled':
+ self.result['changed'] = True
+ args = {}
+ args['id'] = user['id']
+ if not self.module.check_mode:
+ res = self.cs.enableUser(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ user = res['user']
+ return user
+
+
+ def lock_user(self):
+ user = self.get_user()
+ if not user:
+ user = self.present_user()
+
+ # we need to enable the user to lock it.
+ if user['state'].lower() == 'disabled':
+ user = self.enable_user()
+
+ if user['state'].lower() != 'locked':
+ self.result['changed'] = True
+ args = {}
+ args['id'] = user['id']
+ if not self.module.check_mode:
+ res = self.cs.lockUser(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ user = res['user']
+ return user
+
+
+ def disable_user(self):
+ user = self.get_user()
+ if not user:
+ user = self.present_user()
+
+ if user['state'].lower() != 'disabled':
+ self.result['changed'] = True
+ args = {}
+ args['id'] = user['id']
+ if not self.module.check_mode:
+ user = self.cs.disableUser(**args)
+ if 'errortext' in user:
+ self.module.fail_json(msg="Failed: '%s'" % user['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ user = self.poll_job(user, 'user')
+ return user
+
+
+ def present_user(self):
+ missing_params = []
+ for required_params in [
+ 'account',
+ 'email',
+ 'password',
+ 'first_name',
+ 'last_name',
+ ]:
+ if not self.module.params.get(required_params):
+ missing_params.append(required_params)
+ if missing_params:
+ self.module.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
+
+ user = self.get_user()
+ if user:
+ user = self._update_user(user)
+ else:
+ user = self._create_user(user)
+ return user
+
+
+ def _create_user(self, user):
+ self.result['changed'] = True
+
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain('id')
+ args['username'] = self.module.params.get('username')
+ args['password'] = self.module.params.get('password')
+ args['firstname'] = self.module.params.get('first_name')
+ args['lastname'] = self.module.params.get('last_name')
+ args['email'] = self.module.params.get('email')
+ args['timezone'] = self.module.params.get('timezone')
+ if not self.module.check_mode:
+ res = self.cs.createUser(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ user = res['user']
+ # register user api keys
+ res = self.cs.registerUserKeys(id=user['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ user.update(res['userkeys'])
+ return user
+
+
+ def _update_user(self, user):
+ args = {}
+ args['id'] = user['id']
+ args['firstname'] = self.module.params.get('first_name')
+ args['lastname'] = self.module.params.get('last_name')
+ args['email'] = self.module.params.get('email')
+ args['timezone'] = self.module.params.get('timezone')
+ if self.has_changed(args, user):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.updateUser(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ user = res['user']
+ # register user api keys
+ if 'apikey' not in user:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.registerUserKeys(id=user['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ user.update(res['userkeys'])
+ return user
+
+
+ def absent_user(self):
+ user = self.get_user()
+ if user:
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ res = self.cs.deleteUser(id=user['id'])
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ return user
+
+
+ def get_result(self, user):
+ super(AnsibleCloudStackUser, self).get_result(user)
+ if user:
+ if 'accounttype' in user:
+ for key,value in self.account_types.items():
+ if value == user['accounttype']:
+ self.result['account_type'] = key
+ break
+ return self.result
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ username = dict(required=True),
+ account = dict(default=None),
+ state = dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked'], default='present'),
+ domain = dict(default='ROOT'),
+ email = dict(default=None),
+ first_name = dict(default=None),
+ last_name = dict(default=None),
+ password = dict(default=None, no_log=True),
+ timezone = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_acc = AnsibleCloudStackUser(module)
+
+ state = module.params.get('state')
+
+ if state in ['absent']:
+ user = acs_acc.absent_user()
+
+ elif state in ['enabled', 'unlocked']:
+ user = acs_acc.enable_user()
+
+ elif state in ['disabled']:
+ user = acs_acc.disable_user()
+
+ elif state in ['locked']:
+ user = acs_acc.lock_user()
+
+ else:
+ user = acs_acc.present_user()
+
+ result = acs_acc.get_result(user)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_vmsnapshot.py b/lib/ansible/modules/cloud/cloudstack/cs_vmsnapshot.py
new file mode 100644
index 0000000000..e3b43820a5
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_vmsnapshot.py
@@ -0,0 +1,304 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_vmsnapshot
+short_description: Manages VM snapshots on Apache CloudStack based clouds.
+description:
+ - Create, remove and revert VM from snapshots.
+version_added: '2.0'
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Unique Name of the snapshot. In CloudStack terms display name.
+ required: true
+ aliases: ['display_name']
+ vm:
+ description:
+ - Name of the virtual machine.
+ required: true
+ description:
+ description:
+ - Description of the snapshot.
+ required: false
+ default: null
+ snapshot_memory:
+ description:
+ - Snapshot memory if set to true.
+ required: false
+ default: false
+ zone:
+ description:
+ - Name of the zone in which the VM is in. If not set, default zone is used.
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the VM is assigned to.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the snapshot.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent', 'revert' ]
+ domain:
+ description:
+ - Domain the VM snapshot is related to.
+ required: false
+ default: null
+ account:
+ description:
+ - Account the VM snapshot is related to.
+ required: false
+ default: null
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create a VM snapshot of disk and memory before an upgrade
+- local_action:
+ module: cs_vmsnapshot
+ name: Snapshot before upgrade
+ vm: web-01
+ snapshot_memory: yes
+
+# Revert a VM to a snapshot after a failed upgrade
+- local_action:
+ module: cs_vmsnapshot
+ name: Snapshot before upgrade
+ vm: web-01
+ state: revert
+
+# Remove a VM snapshot after successful upgrade
+- local_action:
+ module: cs_vmsnapshot
+ name: Snapshot before upgrade
+ vm: web-01
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the snapshot.
+ returned: success
+ type: string
+ sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
+name:
+ description: Name of the snapshot.
+ returned: success
+ type: string
+ sample: snapshot before update
+display_name:
+ description: Display name of the snapshot.
+ returned: success
+ type: string
+ sample: snapshot before update
+created:
+ description: date of the snapshot.
+ returned: success
+ type: string
+ sample: 2015-03-29T14:57:06+0200
+current:
+ description: true if snapshot is current
+ returned: success
+ type: boolean
+ sample: True
+state:
+ description: state of the vm snapshot
+ returned: success
+ type: string
+ sample: Allocated
+type:
+ description: type of vm snapshot
+ returned: success
+ type: string
+ sample: DiskAndMemory
+description:
+ description: description of vm snapshot
+ returned: success
+ type: string
+ sample: snapshot brought to you by Ansible
+domain:
+ description: Domain the the vm snapshot is related to.
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: Account the vm snapshot is related to.
+ returned: success
+ type: string
+ sample: example account
+project:
+ description: Name of project the vm snapshot is related to.
+ returned: success
+ type: string
+ sample: Production
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackVmSnapshot(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackVmSnapshot, self).__init__(module)
+ self.returns = {
+ 'type': 'type',
+ 'current': 'current',
+ }
+
+
+ def get_snapshot(self):
+ args = {}
+ args['virtualmachineid'] = self.get_vm('id')
+ args['account'] = self.get_account('name')
+ args['domainid'] = self.get_domain('id')
+ args['projectid'] = self.get_project('id')
+ args['name'] = self.module.params.get('name')
+
+ snapshots = self.cs.listVMSnapshot(**args)
+ if snapshots:
+ return snapshots['vmSnapshot'][0]
+ return None
+
+
+ def create_snapshot(self):
+ snapshot = self.get_snapshot()
+ if not snapshot:
+ self.result['changed'] = True
+
+ args = {}
+ args['virtualmachineid'] = self.get_vm('id')
+ args['name'] = self.module.params.get('name')
+ args['description'] = self.module.params.get('description')
+ args['snapshotmemory'] = self.module.params.get('snapshot_memory')
+
+ if not self.module.check_mode:
+ res = self.cs.createVMSnapshot(**args)
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ snapshot = self.poll_job(res, 'vmsnapshot')
+
+ return snapshot
+
+
+ def remove_snapshot(self):
+ snapshot = self.get_snapshot()
+ if snapshot:
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.deleteVMSnapshot(vmsnapshotid=snapshot['id'])
+
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ res = self.poll_job(res, 'vmsnapshot')
+ return snapshot
+
+
+ def revert_vm_to_snapshot(self):
+ snapshot = self.get_snapshot()
+ if snapshot:
+ self.result['changed'] = True
+
+ if snapshot['state'] != "Ready":
+ self.module.fail_json(msg="snapshot state is '%s', not ready, could not revert VM" % snapshot['state'])
+
+ if not self.module.check_mode:
+ res = self.cs.revertToVMSnapshot(vmsnapshotid=snapshot['id'])
+
+ poll_async = self.module.params.get('poll_async')
+ if res and poll_async:
+ res = self.poll_job(res, 'vmsnapshot')
+ return snapshot
+
+ self.module.fail_json(msg="snapshot not found, could not revert VM")
+
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True, aliases=['display_name']),
+ vm = dict(required=True),
+ description = dict(default=None),
+ zone = dict(default=None),
+ snapshot_memory = dict(type='bool', default=False),
+ state = dict(choices=['present', 'absent', 'revert'], default='present'),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ required_together = cs_required_together()
+ required_together.extend([
+ ['icmp_type', 'icmp_code'],
+ ])
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=required_together,
+ supports_check_mode=True
+ )
+
+ try:
+ acs_vmsnapshot = AnsibleCloudStackVmSnapshot(module)
+
+ state = module.params.get('state')
+ if state in ['revert']:
+ snapshot = acs_vmsnapshot.revert_vm_to_snapshot()
+ elif state in ['absent']:
+ snapshot = acs_vmsnapshot.remove_snapshot()
+ else:
+ snapshot = acs_vmsnapshot.create_snapshot()
+
+ result = acs_vmsnapshot.get_result(snapshot)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_volume.py b/lib/ansible/modules/cloud/cloudstack/cs_volume.py
new file mode 100644
index 0000000000..36071e0d78
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_volume.py
@@ -0,0 +1,496 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2015, Jefferson Girão <jefferson@girao.net>
+# (c) 2015, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_volume
+short_description: Manages volumes on Apache CloudStack based clouds.
+description:
+ - Create, destroy, attach, detach volumes.
+version_added: "2.1"
+author:
+ - "Jefferson Girão (@jeffersongirao)"
+ - "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the volume.
+ - C(name) can only contain ASCII letters.
+ required: true
+ account:
+ description:
+ - Account the volume is related to.
+ required: false
+ default: null
+ custom_id:
+ description:
+ - Custom id to the resource.
+ - Allowed to Root Admins only.
+ required: false
+ default: null
+ disk_offering:
+ description:
+ - Name of the disk offering to be used.
+ - Required one of C(disk_offering), C(snapshot) if volume is not already C(state=present).
+ required: false
+ default: null
+ display_volume:
+ description:
+ - Whether to display the volume to the end user or not.
+ - Allowed to Root Admins only.
+ required: false
+ default: true
+ domain:
+ description:
+ - Name of the domain the volume to be deployed in.
+ required: false
+ default: null
+ max_iops:
+ description:
+ - Max iops
+ required: false
+ default: null
+ min_iops:
+ description:
+ - Min iops
+ required: false
+ default: null
+ project:
+ description:
+ - Name of the project the volume to be deployed in.
+ required: false
+ default: null
+ size:
+ description:
+ - Size of disk in GB
+ required: false
+ default: null
+ snapshot:
+ description:
+ - The snapshot name for the disk volume.
+ - Required one of C(disk_offering), C(snapshot) if volume is not already C(state=present).
+ required: false
+ default: null
+ force:
+ description:
+ - Force removal of volume even it is attached to a VM.
+ - Considered on C(state=absnet) only.
+ required: false
+ default: false
+ shrink_ok:
+ description:
+ - Whether to allow to shrink the volume.
+ required: false
+ default: false
+ vm:
+ description:
+ - Name of the virtual machine to attach the volume to.
+ required: false
+ default: null
+ zone:
+ description:
+ - Name of the zone in which the volume should be deployed.
+ - If not set, default zone is used.
+ required: false
+ default: null
+ state:
+ description:
+ - State of the volume.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'absent', 'attached', 'detached' ]
+ poll_async:
+ description:
+ - Poll async jobs until job has finished.
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Create volume within project, zone with specified storage options
+- local_action:
+ module: cs_volume
+ name: web-vm-1-volume
+ project: Integration
+ zone: ch-zrh-ix-01
+ disk_offering: PerfPlus Storage
+ size: 20
+
+# Create/attach volume to instance
+- local_action:
+ module: cs_volume
+ name: web-vm-1-volume
+ disk_offering: PerfPlus Storage
+ size: 20
+ vm: web-vm-1
+ state: attached
+
+# Detach volume
+- local_action:
+ module: cs_volume
+ name: web-vm-1-volume
+ state: detached
+
+# Remove volume
+- local_action:
+ module: cs_volume
+ name: web-vm-1-volume
+ state: absent
+'''
+
+RETURN = '''
+id:
+ description: ID of the volume.
+ returned: success
+ type: string
+ sample:
+name:
+ description: Name of the volume.
+ returned: success
+ type: string
+ sample: web-volume-01
+display_name:
+ description: Display name of the volume.
+ returned: success
+ type: string
+ sample: web-volume-01
+group:
+ description: Group the volume belongs to
+ returned: success
+ type: string
+ sample: web
+domain:
+ description: Domain the volume belongs to
+ returned: success
+ type: string
+ sample: example domain
+project:
+ description: Project the volume belongs to
+ returned: success
+ type: string
+ sample: Production
+zone:
+ description: Name of zone the volume is in.
+ returned: success
+ type: string
+ sample: ch-gva-2
+created:
+ description: Date of the volume was created.
+ returned: success
+ type: string
+ sample: 2014-12-01T14:57:57+0100
+attached:
+ description: Date of the volume was attached.
+ returned: success
+ type: string
+ sample: 2014-12-01T14:57:57+0100
+type:
+ description: Disk volume type.
+ returned: success
+ type: string
+ sample: DATADISK
+size:
+ description: Size of disk volume.
+ returned: success
+ type: string
+ sample: 20
+vm:
+ description: Name of the vm the volume is attached to (not returned when detached)
+ returned: success
+ type: string
+ sample: web-01
+state:
+ description: State of the volume
+ returned: success
+ type: string
+ sample: Attached
+device_id:
+ description: Id of the device on user vm the volume is attached to (not returned when detached)
+ returned: success
+ type: string
+ sample: 1
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackVolume(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackVolume, self).__init__(module)
+ self.returns = {
+ 'group': 'group',
+ 'attached': 'attached',
+ 'vmname': 'vm',
+ 'deviceid': 'device_id',
+ 'type': 'type',
+ 'size': 'size',
+ }
+ self.volume = None
+
+ #TODO implement in cloudstack utils
+ def get_disk_offering(self, key=None):
+ disk_offering = self.module.params.get('disk_offering')
+ if not disk_offering:
+ return None
+
+ # Do not add domain filter for disk offering listing.
+ disk_offerings = self.cs.listDiskOfferings()
+ if disk_offerings:
+ for d in disk_offerings['diskoffering']:
+ if disk_offering in [d['displaytext'], d['name'], d['id']]:
+ return self._get_by_key(key, d)
+ self.module.fail_json(msg="Disk offering '%s' not found" % disk_offering)
+
+
+ def get_volume(self):
+ if not self.volume:
+ args = {}
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['projectid'] = self.get_project(key='id')
+ args['zoneid'] = self.get_zone(key='id')
+ args['displayvolume'] = self.module.params.get('display_volume')
+ args['type'] = 'DATADISK'
+
+ volumes = self.cs.listVolumes(**args)
+ if volumes:
+ volume_name = self.module.params.get('name')
+ for v in volumes['volume']:
+ if volume_name.lower() == v['name'].lower():
+ self.volume = v
+ break
+ return self.volume
+
+
+ def get_snapshot(self, key=None):
+ snapshot = self.module.params.get('snapshot')
+ if not snapshot:
+ return None
+
+ args = {}
+ args['name'] = snapshot
+ args['account'] = self.get_account('name')
+ args['domainid'] = self.get_domain('id')
+ args['projectid'] = self.get_project('id')
+
+ snapshots = self.cs.listSnapshots(**args)
+ if snapshots:
+ return self._get_by_key(key, snapshots['snapshot'][0])
+ self.module.fail_json(msg="Snapshot with name %s not found" % snapshot)
+
+
+ def present_volume(self):
+ volume = self.get_volume()
+ if volume:
+ volume = self.update_volume(volume)
+ else:
+ disk_offering_id = self.get_disk_offering(key='id')
+ snapshot_id = self.get_snapshot(key='id')
+
+ if not disk_offering_id and not snapshot_id:
+ self.module.fail_json(msg="Required one of: disk_offering,snapshot")
+
+ self.result['changed'] = True
+
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['account'] = self.get_account(key='name')
+ args['domainid'] = self.get_domain(key='id')
+ args['diskofferingid'] = disk_offering_id
+ args['displayvolume'] = self.module.params.get('display_volume')
+ args['maxiops'] = self.module.params.get('max_iops')
+ args['miniops'] = self.module.params.get('min_iops')
+ args['projectid'] = self.get_project(key='id')
+ args['size'] = self.module.params.get('size')
+ args['snapshotid'] = snapshot_id
+ args['zoneid'] = self.get_zone(key='id')
+
+ if not self.module.check_mode:
+ res = self.cs.createVolume(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ volume = self.poll_job(res, 'volume')
+ return volume
+
+
+ def attached_volume(self):
+ volume = self.present_volume()
+
+ if volume:
+ if volume.get('virtualmachineid') != self.get_vm(key='id'):
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ volume = self.detached_volume()
+
+ if 'attached' not in volume:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = volume['id']
+ args['virtualmachineid'] = self.get_vm(key='id')
+ args['deviceid'] = self.module.params.get('device_id')
+
+ if not self.module.check_mode:
+ res = self.cs.attachVolume(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ volume = self.poll_job(res, 'volume')
+ return volume
+
+
+ def detached_volume(self):
+ volume = self.present_volume()
+
+ if volume:
+ if 'attached' not in volume:
+ return volume
+
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ res = self.cs.detachVolume(id=volume['id'])
+ if 'errortext' in volume:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ volume = self.poll_job(res, 'volume')
+ return volume
+
+
+ def absent_volume(self):
+ volume = self.get_volume()
+
+ if volume:
+ if 'attached' in volume and not self.module.params.get('force'):
+ self.module.fail_json(msg="Volume '%s' is attached, use force=true for detaching and removing the volume." % volume.get('name'))
+
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ volume = self.detached_volume()
+
+ res = self.cs.deleteVolume(id=volume['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ res = self.poll_job(res, 'volume')
+
+ return volume
+
+
+ def update_volume(self, volume):
+ args_resize = {}
+ args_resize['id'] = volume['id']
+ args_resize['diskofferingid'] = self.get_disk_offering(key='id')
+ args_resize['maxiops'] = self.module.params.get('max_iops')
+ args_resize['miniops'] = self.module.params.get('min_iops')
+ args_resize['size'] = self.module.params.get('size')
+
+ # change unit from bytes to giga bytes to compare with args
+ volume_copy = volume.copy()
+ volume_copy['size'] = volume_copy['size'] / (2**30)
+
+ if self.has_changed(args_resize, volume_copy):
+
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ args_resize['shrinkok'] = self.module.params.get('shrink_ok')
+ res = self.cs.resizeVolume(**args_resize)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ volume = self.poll_job(res, 'volume')
+ self.volume = volume
+
+ return volume
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ disk_offering = dict(default=None),
+ display_volume = dict(type='bool', default=None),
+ max_iops = dict(type='int', default=None),
+ min_iops = dict(type='int', default=None),
+ size = dict(type='int', default=None),
+ snapshot = dict(default=None),
+ vm = dict(default=None),
+ device_id = dict(type='int', default=None),
+ custom_id = dict(default=None),
+ force = dict(type='bool', default=False),
+ shrink_ok = dict(type='bool', default=False),
+ state = dict(choices=['present', 'absent', 'attached', 'detached'], default='present'),
+ zone = dict(default=None),
+ domain = dict(default=None),
+ account = dict(default=None),
+ project = dict(default=None),
+ poll_async = dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ mutually_exclusive = (
+ ['snapshot', 'disk_offering'],
+ ),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_vol = AnsibleCloudStackVolume(module)
+
+ state = module.params.get('state')
+
+ if state in ['absent']:
+ volume = acs_vol.absent_volume()
+ elif state in ['attached']:
+ volume = acs_vol.attached_volume()
+ elif state in ['detached']:
+ volume = acs_vol.detached_volume()
+ else:
+ volume = acs_vol.present_volume()
+
+ result = acs_vol.get_result(volume)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_vpc.py b/lib/ansible/modules/cloud/cloudstack/cs_vpc.py
new file mode 100644
index 0000000000..1495b86550
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_vpc.py
@@ -0,0 +1,391 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it an/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http//www.gnu.or/license/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_vpc
+short_description: "Manages VPCs on Apache CloudStack based clouds."
+description:
+ - "Create, update and delete VPCs."
+version_added: "2.3"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - "Name of the VPC."
+ required: true
+ display_text:
+ description:
+ - "Display text of the VPC."
+ - "If not set, C(name) will be used for creating."
+ required: false
+ default: null
+ cidr:
+ description:
+ - "CIDR of the VPC, e.g. 10.1.0.0/16"
+ - "All VPC guest networks' CIDRs must be within this CIDR."
+ - "Required on C(state=present)."
+ required: false
+ default: null
+ network_domain:
+ description:
+ - "Network domain for the VPC."
+ - "All networks inside the VPC will belong to this domain."
+ required: false
+ default: null
+ vpc_offering:
+ description:
+ - "Name of the VPC offering."
+ - "If not set, default VPC offering is used."
+ required: false
+ default: null
+ state:
+ description:
+ - "State of the VPC."
+ required: false
+ default: present
+ choices:
+ - present
+ - absent
+ - restarted
+ domain:
+ description:
+ - "Domain the VPC is related to."
+ required: false
+ default: null
+ account:
+ description:
+ - "Account the VPC is related to."
+ required: false
+ default: null
+ project:
+ description:
+ - "Name of the project the VPC is related to."
+ required: false
+ default: null
+ zone:
+ description:
+ - "Name of the zone."
+ - "If not set, default zone is used."
+ required: false
+ default: null
+ tags:
+ description:
+ - "List of tags. Tags are a list of dictionaries having keys C(key) and C(value)."
+ - "For deleting all tags, set an empty list e.g. C(tags: [])."
+ required: false
+ default: null
+ aliases:
+ - tag
+ poll_async:
+ description:
+ - "Poll async jobs until job has finished."
+ required: false
+ default: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Ensure a VPC is present
+- local_action:
+ module: cs_vpc
+ name: my_vpc
+ display_text: My example VPC
+ cidr: 10.10.0.0/16
+
+# Ensure a VPC is absent
+- local_action:
+ module: cs_vpc
+ name: my_vpc
+ state: absent
+
+# Ensure a VPC is restarted
+- local_action:
+ module: cs_vpc
+ name: my_vpc
+ state: restarted
+'''
+
+RETURN = '''
+---
+id:
+ description: "UUID of the VPC."
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: "Name of the VPC."
+ returned: success
+ type: string
+ sample: my_vpc
+display_text:
+ description: "Display text of the VPC."
+ returned: success
+ type: string
+ sample: My example VPC
+cidr:
+ description: "CIDR of the VPC."
+ returned: success
+ type: string
+ sample: 10.10.0.0/16
+network_domain:
+ description: "Network domain of the VPC."
+ returned: success
+ type: string
+ sample: example.com
+region_level_vpc:
+ description: "Whether the VPC is region level or not."
+ returned: success
+ type: boolean
+ sample: true
+restart_required:
+ description: "Wheter the VPC router needs a restart or not."
+ returned: success
+ type: boolean
+ sample: true
+distributed_vpc_router:
+ description: "Whether the VPC uses distributed router or not."
+ returned: success
+ type: boolean
+ sample: true
+redundant_vpc_router:
+ description: "Whether the VPC has redundant routers or not."
+ returned: success
+ type: boolean
+ sample: true
+domain:
+ description: "Domain the VPC is related to."
+ returned: success
+ type: string
+ sample: example domain
+account:
+ description: "Account the VPC is related to."
+ returned: success
+ type: string
+ sample: example account
+project:
+ description: "Name of project the VPC is related to."
+ returned: success
+ type: string
+ sample: Production
+zone:
+ description: "Name of zone the VPC is in."
+ returned: success
+ type: string
+ sample: ch-gva-2
+state:
+ description: "State of the VPC."
+ returned: success
+ type: string
+ sample: Enabled
+tags:
+ description: "List of resource tags associated with the VPC."
+ returned: success
+ type: dict
+ sample: '[ { "key": "foo", "value": "bar" } ]'
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.cloudstack import *
+
+
+class AnsibleCloudStackVpc(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackVpc, self).__init__(module)
+ self.returns = {
+ 'cidr': 'cidr',
+ 'networkdomain': 'network_domain',
+ 'redundantvpcrouter': 'redundant_vpc_router',
+ 'distributedvpcrouter': 'distributed_vpc_router',
+ 'regionlevelvpc': 'region_level_vpc',
+ 'restartrequired': 'restart_required',
+ }
+ self.vpc = None
+ self.vpc_offering = None
+
+ def get_vpc_offering(self, key=None):
+ if self.vpc_offering:
+ return self._get_by_key(key, self.vpc_offering)
+
+ vpc_offering = self.module.params.get('vpc_offering')
+ args = {}
+ if vpc_offering:
+ args['name'] = vpc_offering
+ else:
+ args['isdefault'] = True
+
+ vpc_offerings = self.cs.listVPCOfferings(**args)
+ if vpc_offerings:
+ self.vpc_offering = vpc_offerings['vpcoffering'][0]
+ return self._get_by_key(key, self.vpc_offering)
+ self.module.fail_json(msg="VPC offering '%s' not found" % vpc_offering)
+
+ def get_vpc(self):
+ if self.vpc:
+ return self.vpc
+ args = {
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'projectid': self.get_project(key='id'),
+ 'zoneid': self.get_zone(key='id'),
+ }
+ vpcs = self.cs.listVPCs()
+ if vpcs:
+ vpc_name = self.module.params.get('name')
+ for v in vpcs['vpc']:
+ if vpc_name.lower() in [ v['name'].lower(), v['id']]:
+ self.vpc = v
+ break
+ return self.vpc
+
+ def restart_vpc(self):
+ self.result['changed'] = True
+ vpc = self.get_vpc()
+ if vpc and not self.module.check_mode:
+ args = {
+ 'id': vpc['id'],
+ }
+ res = self.cs.restartVPC(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ self.poll_job(res, 'vpc')
+ return vpc
+
+ def present_vpc(self):
+ vpc = self.get_vpc()
+ if not vpc:
+ vpc = self._create_vpc(vpc)
+ else:
+ vpc = self._update_vpc(vpc)
+
+ if vpc:
+ vpc = self.ensure_tags(resource=vpc, resource_type='Vpc')
+ return vpc
+
+ def _create_vpc(self, vpc):
+ self.result['changed'] = True
+ args = {
+ 'name': self.module.params.get('name'),
+ 'displaytext': self.get_or_fallback('display_text', 'name'),
+ 'vpcofferingid': self.get_vpc_offering(key='id'),
+ 'cidr': self.module.params.get('cidr'),
+ 'account': self.get_account(key='name'),
+ 'domainid': self.get_domain(key='id'),
+ 'projectid': self.get_project(key='id'),
+ 'zoneid': self.get_zone(key='id'),
+ }
+ self.result['diff']['after'] = args
+ if not self.module.check_mode:
+ res = self.cs.createVPC(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ vpc = self.poll_job(res, 'vpc')
+ return vpc
+
+ def _update_vpc(self, vpc):
+ args = {
+ 'id': vpc['id'],
+ 'displaytext': self.module.params.get('display_text'),
+ }
+ if self.has_changed(args, vpc):
+ self.result['changed'] = True
+ if not self.module.check_mode:
+ res = self.cs.updateVPC(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ vpc = self.poll_job(res, 'vpc')
+ return vpc
+
+ def absent_vpc(self):
+ vpc = self.get_vpc()
+ if vpc:
+ self.result['changed'] = True
+ self.result['diff']['before'] = vpc
+ if not self.module.check_mode:
+ res = self.cs.deleteVPC(id=vpc['id'])
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+
+ poll_async = self.module.params.get('poll_async')
+ if poll_async:
+ self.poll_job(res, 'vpc')
+ return vpc
+
+
+def main():
+ argument_spec=cs_argument_spec()
+ argument_spec.update(dict(
+ name=dict(required=True),
+ cidr=dict(default=None),
+ display_text=dict(default=None),
+ vpc_offering=dict(default=None),
+ network_domain=dict(default=None),
+ state=dict(choices=['present', 'absent', 'restarted'], default='present'),
+ domain=dict(default=None),
+ account=dict(default=None),
+ project=dict(default=None),
+ zone=dict(default=None),
+ tags=dict(type='list', aliases=['tag'], default=None),
+ poll_async=dict(type='bool', default=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ required_if=[
+ ('state', 'present', ['cidr']),
+ ],
+ supports_check_mode=True,
+ )
+
+ try:
+ acs_vpc = AnsibleCloudStackVpc(module)
+
+ state = module.params.get('state')
+ if state == 'absent':
+ vpc = acs_vpc.absent_vpc()
+ elif state == 'restarted':
+ vpc = acs_vpc.restart_vpc()
+ else:
+ vpc = acs_vpc.present_vpc()
+
+ result = acs_vpc.get_result(vpc)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_zone.py b/lib/ansible/modules/cloud/cloudstack/cs_zone.py
new file mode 100644
index 0000000000..1dd5dd6422
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_zone.py
@@ -0,0 +1,406 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_zone
+short_description: Manages zones on Apache CloudStack based clouds.
+description:
+ - Create, update and remove zones.
+version_added: "2.1"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the zone.
+ required: true
+ id:
+ description:
+ - uuid of the exising zone.
+ default: null
+ required: false
+ state:
+ description:
+ - State of the zone.
+ required: false
+ default: 'present'
+ choices: [ 'present', 'enabled', 'disabled', 'absent' ]
+ domain:
+ description:
+ - Domain the zone is related to.
+ - Zone is a public zone if not set.
+ required: false
+ default: null
+ network_domain:
+ description:
+ - Network domain for the zone.
+ required: false
+ default: null
+ network_type:
+ description:
+ - Network type of the zone.
+ required: false
+ default: basic
+ choices: [ 'basic', 'advanced' ]
+ dns1:
+ description:
+ - First DNS for the zone.
+ - Required if C(state=present)
+ required: false
+ default: null
+ dns2:
+ description:
+ - Second DNS for the zone.
+ required: false
+ default: null
+ internal_dns1:
+ description:
+ - First internal DNS for the zone.
+ - If not set C(dns1) will be used on C(state=present).
+ required: false
+ default: null
+ internal_dns2:
+ description:
+ - Second internal DNS for the zone.
+ required: false
+ default: null
+ dns1_ipv6:
+ description:
+ - First DNS for IPv6 for the zone.
+ required: false
+ default: null
+ dns2_ipv6:
+ description:
+ - Second DNS for IPv6 for the zone.
+ required: false
+ default: null
+ guest_cidr_address:
+ description:
+ - Guest CIDR address for the zone.
+ required: false
+ default: null
+ dhcp_provider:
+ description:
+ - DHCP provider for the Zone.
+ required: false
+ default: null
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+# Ensure a zone is present
+- local_action:
+ module: cs_zone
+ name: ch-zrh-ix-01
+ dns1: 8.8.8.8
+ dns2: 8.8.4.4
+ network_type: basic
+
+# Ensure a zone is disabled
+- local_action:
+ module: cs_zone
+ name: ch-zrh-ix-01
+ state: disabled
+
+# Ensure a zone is enabled
+- local_action:
+ module: cs_zone
+ name: ch-zrh-ix-01
+ state: enabled
+
+# Ensure a zone is absent
+- local_action:
+ module: cs_zone
+ name: ch-zrh-ix-01
+ state: absent
+'''
+
+RETURN = '''
+---
+id:
+ description: UUID of the zone.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+name:
+ description: Name of the zone.
+ returned: success
+ type: string
+ sample: zone01
+dns1:
+ description: First DNS for the zone.
+ returned: success
+ type: string
+ sample: 8.8.8.8
+dns2:
+ description: Second DNS for the zone.
+ returned: success
+ type: string
+ sample: 8.8.4.4
+internal_dns1:
+ description: First internal DNS for the zone.
+ returned: success
+ type: string
+ sample: 8.8.8.8
+internal_dns2:
+ description: Second internal DNS for the zone.
+ returned: success
+ type: string
+ sample: 8.8.4.4
+dns1_ipv6:
+ description: First IPv6 DNS for the zone.
+ returned: success
+ type: string
+ sample: "2001:4860:4860::8888"
+dns2_ipv6:
+ description: Second IPv6 DNS for the zone.
+ returned: success
+ type: string
+ sample: "2001:4860:4860::8844"
+allocation_state:
+ description: State of the zone.
+ returned: success
+ type: string
+ sample: Enabled
+domain:
+ description: Domain the zone is related to.
+ returned: success
+ type: string
+ sample: ROOT
+network_domain:
+ description: Network domain for the zone.
+ returned: success
+ type: string
+ sample: example.com
+network_type:
+ description: Network type for the zone.
+ returned: success
+ type: string
+ sample: basic
+local_storage_enabled:
+ description: Local storage offering enabled.
+ returned: success
+ type: bool
+ sample: false
+securitygroups_enabled:
+ description: Security groups support is enabled.
+ returned: success
+ type: bool
+ sample: false
+guest_cidr_address:
+ description: Guest CIDR address for the zone
+ returned: success
+ type: string
+ sample: 10.1.1.0/24
+dhcp_provider:
+ description: DHCP provider for the zone
+ returned: success
+ type: string
+ sample: VirtualRouter
+zone_token:
+ description: Zone token
+ returned: success
+ type: string
+ sample: ccb0a60c-79c8-3230-ab8b-8bdbe8c45bb7
+tags:
+ description: List of resource tags associated with the zone.
+ returned: success
+ type: dict
+ sample: [ { "key": "foo", "value": "bar" } ]
+'''
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackZone(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackZone, self).__init__(module)
+ self.returns = {
+ 'dns1': 'dns1',
+ 'dns2': 'dns2',
+ 'internaldns1': 'internal_dns1',
+ 'internaldns2': 'internal_dns2',
+ 'ipv6dns1': 'dns1_ipv6',
+ 'ipv6dns2': 'dns2_ipv6',
+ 'domain': 'network_domain',
+ 'networktype': 'network_type',
+ 'securitygroupsenabled': 'securitygroups_enabled',
+ 'localstorageenabled': 'local_storage_enabled',
+ 'guestcidraddress': 'guest_cidr_address',
+ 'dhcpprovider': 'dhcp_provider',
+ 'allocationstate': 'allocation_state',
+ 'zonetoken': 'zone_token',
+ }
+ self.zone = None
+
+
+ def _get_common_zone_args(self):
+ args = {}
+ args['name'] = self.module.params.get('name')
+ args['dns1'] = self.module.params.get('dns1')
+ args['dns2'] = self.module.params.get('dns2')
+ args['internaldns1'] = self.get_or_fallback('internal_dns1', 'dns1')
+ args['internaldns2'] = self.get_or_fallback('internal_dns2', 'dns2')
+ args['ipv6dns1'] = self.module.params.get('dns1_ipv6')
+ args['ipv6dns2'] = self.module.params.get('dns2_ipv6')
+ args['networktype'] = self.module.params.get('network_type')
+ args['domain'] = self.module.params.get('network_domain')
+ args['localstorageenabled'] = self.module.params.get('local_storage_enabled')
+ args['guestcidraddress'] = self.module.params.get('guest_cidr_address')
+ args['dhcpprovider'] = self.module.params.get('dhcp_provider')
+ state = self.module.params.get('state')
+ if state in [ 'enabled', 'disabled']:
+ args['allocationstate'] = state.capitalize()
+ return args
+
+
+ def get_zone(self):
+ if not self.zone:
+ args = {}
+
+ uuid = self.module.params.get('id')
+ if uuid:
+ args['id'] = uuid
+ zones = self.cs.listZones(**args)
+ if zones:
+ self.zone = zones['zone'][0]
+ return self.zone
+
+ args['name'] = self.module.params.get('name')
+ zones = self.cs.listZones(**args)
+ if zones:
+ self.zone = zones['zone'][0]
+ return self.zone
+
+
+ def present_zone(self):
+ zone = self.get_zone()
+ if zone:
+ zone = self._update_zone()
+ else:
+ zone = self._create_zone()
+ return zone
+
+
+ def _create_zone(self):
+ required_params = [
+ 'dns1',
+ ]
+ self.module.fail_on_missing_params(required_params=required_params)
+
+ self.result['changed'] = True
+
+ args = self._get_common_zone_args()
+ args['domainid'] = self.get_domain(key='id')
+ args['securitygroupenabled'] = self.module.params.get('securitygroups_enabled')
+
+ zone = None
+ if not self.module.check_mode:
+ res = self.cs.createZone(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ zone = res['zone']
+ return zone
+
+
+ def _update_zone(self):
+ zone = self.get_zone()
+
+ args = self._get_common_zone_args()
+ args['id'] = zone['id']
+
+ if self.has_changed(args, zone):
+ self.result['changed'] = True
+
+ if not self.module.check_mode:
+ res = self.cs.updateZone(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ zone = res['zone']
+ return zone
+
+
+ def absent_zone(self):
+ zone = self.get_zone()
+ if zone:
+ self.result['changed'] = True
+
+ args = {}
+ args['id'] = zone['id']
+
+ if not self.module.check_mode:
+ res = self.cs.deleteZone(**args)
+ if 'errortext' in res:
+ self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
+ return zone
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ id = dict(default=None),
+ name = dict(required=True),
+ dns1 = dict(default=None),
+ dns2 = dict(default=None),
+ internal_dns1 = dict(default=None),
+ internal_dns2 = dict(default=None),
+ dns1_ipv6 = dict(default=None),
+ dns2_ipv6 = dict(default=None),
+ network_type = dict(default='basic', choices=['Basic', 'basic', 'Advanced', 'advanced']),
+ network_domain = dict(default=None),
+ guest_cidr_address = dict(default=None),
+ dhcp_provider = dict(default=None),
+ local_storage_enabled = dict(default=None),
+ securitygroups_enabled = dict(default=None),
+ state = dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'),
+ domain = dict(default=None),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=cs_required_together(),
+ supports_check_mode=True
+ )
+
+ try:
+ acs_zone = AnsibleCloudStackZone(module)
+
+ state = module.params.get('state')
+ if state in ['absent']:
+ zone = acs_zone.absent_zone()
+ else:
+ zone = acs_zone.present_zone()
+
+ result = acs_zone.get_result(zone)
+
+ except CloudStackException as e:
+ module.fail_json(msg='CloudStackException: %s' % str(e))
+
+ module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_zone_facts.py b/lib/ansible/modules/cloud/cloudstack/cs_zone_facts.py
new file mode 100644
index 0000000000..74894b7494
--- /dev/null
+++ b/lib/ansible/modules/cloud/cloudstack/cs_zone_facts.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (c) 2016, René Moser <mail@renemoser.net>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['stableinterface'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: cs_zone_facts
+short_description: Gathering facts of zones from Apache CloudStack based clouds.
+description:
+ - Gathering facts from the API of a zone.
+version_added: "2.1"
+author: "René Moser (@resmo)"
+options:
+ name:
+ description:
+ - Name of the zone.
+ required: true
+extends_documentation_fragment: cloudstack
+'''
+
+EXAMPLES = '''
+- cs_zone_facts:
+ name: ch-gva-1
+ delegate_to: localhost
+
+- debug:
+ var: cloudstack_zone
+'''
+
+RETURN = '''
+---
+cloudstack_zone.id:
+ description: UUID of the zone.
+ returned: success
+ type: string
+ sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
+cloudstack_zone.name:
+ description: Name of the zone.
+ returned: success
+ type: string
+ sample: zone01
+cloudstack_zone.dns1:
+ description: First DNS for the zone.
+ returned: success
+ type: string
+ sample: 8.8.8.8
+cloudstack_zone.dns2:
+ description: Second DNS for the zone.
+ returned: success
+ type: string
+ sample: 8.8.4.4
+cloudstack_zone.internal_dns1:
+ description: First internal DNS for the zone.
+ returned: success
+ type: string
+ sample: 8.8.8.8
+cloudstack_zone.internal_dns2:
+ description: Second internal DNS for the zone.
+ returned: success
+ type: string
+ sample: 8.8.4.4
+cloudstack_zone.dns1_ipv6:
+ description: First IPv6 DNS for the zone.
+ returned: success
+ type: string
+ sample: "2001:4860:4860::8888"
+cloudstack_zone.dns2_ipv6:
+ description: Second IPv6 DNS for the zone.
+ returned: success
+ type: string
+ sample: "2001:4860:4860::8844"
+cloudstack_zone.allocation_state:
+ description: State of the zone.
+ returned: success
+ type: string
+ sample: Enabled
+cloudstack_zone.domain:
+ description: Domain the zone is related to.
+ returned: success
+ type: string
+ sample: ROOT
+cloudstack_zone.network_domain:
+ description: Network domain for the zone.
+ returned: success
+ type: string
+ sample: example.com
+cloudstack_zone.network_type:
+ description: Network type for the zone.
+ returned: success
+ type: string
+ sample: basic
+cloudstack_zone.local_storage_enabled:
+ description: Local storage offering enabled.
+ returned: success
+ type: bool
+ sample: false
+cloudstack_zone.securitygroups_enabled:
+ description: Security groups support is enabled.
+ returned: success
+ type: bool
+ sample: false
+cloudstack_zone.guest_cidr_address:
+ description: Guest CIDR address for the zone
+ returned: success
+ type: string
+ sample: 10.1.1.0/24
+cloudstack_zone.dhcp_provider:
+ description: DHCP provider for the zone
+ returned: success
+ type: string
+ sample: VirtualRouter
+cloudstack_zone.zone_token:
+ description: Zone token
+ returned: success
+ type: string
+ sample: ccb0a60c-79c8-3230-ab8b-8bdbe8c45bb7
+cloudstack_zone.tags:
+ description: List of resource tags associated with the zone.
+ returned: success
+ type: dict
+ sample: [ { "key": "foo", "value": "bar" } ]
+'''
+
+import base64
+
+# import cloudstack common
+from ansible.module_utils.cloudstack import *
+
+class AnsibleCloudStackZoneFacts(AnsibleCloudStack):
+
+ def __init__(self, module):
+ super(AnsibleCloudStackZoneFacts, self).__init__(module)
+ self.returns = {
+ 'dns1': 'dns1',
+ 'dns2': 'dns2',
+ 'internaldns1': 'internal_dns1',
+ 'internaldns2': 'internal_dns2',
+ 'ipv6dns1': 'dns1_ipv6',
+ 'ipv6dns2': 'dns2_ipv6',
+ 'domain': 'network_domain',
+ 'networktype': 'network_type',
+ 'securitygroupsenabled': 'securitygroups_enabled',
+ 'localstorageenabled': 'local_storage_enabled',
+ 'guestcidraddress': 'guest_cidr_address',
+ 'dhcpprovider': 'dhcp_provider',
+ 'allocationstate': 'allocation_state',
+ 'zonetoken': 'zone_token',
+ }
+ self.facts = {
+ 'cloudstack_zone': None,
+ }
+
+
+ def get_zone(self):
+ if not self.zone:
+ # TODO: add param key signature in get_zone()
+ self.module.params['zone'] = self.module.params.get('name')
+ super(AnsibleCloudStackZoneFacts, self).get_zone()
+ return self.zone
+
+
+ def run(self):
+ zone = self.get_zone()
+ self.facts['cloudstack_zone'] = self.get_result(zone)
+ return self.facts
+
+
+def main():
+ argument_spec = cs_argument_spec()
+ argument_spec.update(dict(
+ name = dict(required=True),
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=False,
+ )
+
+ cs_zone_facts = AnsibleCloudStackZoneFacts(module=module).run()
+ cs_facts_result = dict(changed=False, ansible_facts=cs_zone_facts)
+ module.exit_json(**cs_facts_result)
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/google/gcdns_record.py b/lib/ansible/modules/cloud/google/gcdns_record.py
new file mode 100644
index 0000000000..7c209c5cba
--- /dev/null
+++ b/lib/ansible/modules/cloud/google/gcdns_record.py
@@ -0,0 +1,794 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 CallFire Inc.
+#
+# This file is part of Ansible.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+################################################################################
+# Documentation
+################################################################################
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: gcdns_record
+short_description: Creates or removes resource records in Google Cloud DNS
+description:
+ - Creates or removes resource records in Google Cloud DNS.
+version_added: "2.2"
+author: "William Albert (@walbert947)"
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+options:
+ state:
+ description:
+ - Whether the given resource record should or should not be present.
+ required: false
+ choices: ["present", "absent"]
+ default: "present"
+ record:
+ description:
+ - The fully-qualified domain name of the resource record.
+ required: true
+ aliases: ['name']
+ zone:
+ description:
+ - The DNS domain name of the zone (e.g., example.com).
+ - One of either I(zone) or I(zone_id) must be specified as an
+ option, or the module will fail.
+ - If both I(zone) and I(zone_id) are specifed, I(zone_id) will be
+ used.
+ required: false
+ zone_id:
+ description:
+ - The Google Cloud ID of the zone (e.g., example-com).
+ - One of either I(zone) or I(zone_id) must be specified as an
+ option, or the module will fail.
+ - These usually take the form of domain names with the dots replaced
+ with dashes. A zone ID will never have any dots in it.
+ - I(zone_id) can be faster than I(zone) in projects with a large
+ number of zones.
+ - If both I(zone) and I(zone_id) are specifed, I(zone_id) will be
+ used.
+ required: false
+ type:
+ description:
+ - The type of resource record to add.
+ required: true
+ choices: [ 'A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR' ]
+ record_data:
+ description:
+ - The record_data to use for the resource record.
+ - I(record_data) must be specified if I(state) is C(present) or
+ I(overwrite) is C(True), or the module will fail.
+ - Valid record_data vary based on the record's I(type). In addition,
+ resource records that contain a DNS domain name in the value
+ field (e.g., CNAME, PTR, SRV, .etc) MUST include a trailing dot
+ in the value.
+ - Individual string record_data for TXT records must be enclosed in
+ double quotes.
+ - For resource records that have the same name but different
+ record_data (e.g., multiple A records), they must be defined as
+ multiple list entries in a single record.
+ required: false
+ aliases: ['value']
+ ttl:
+ description:
+ - The amount of time in seconds that a resource record will remain
+ cached by a caching resolver.
+ required: false
+ default: 300
+ overwrite:
+ description:
+ - Whether an attempt to overwrite an existing record should succeed
+ or fail. The behavior of this option depends on I(state).
+ - If I(state) is C(present) and I(overwrite) is C(True), this
+ module will replace an existing resource record of the same name
+ with the provided I(record_data). If I(state) is C(present) and
+ I(overwrite) is C(False), this module will fail if there is an
+ existing resource record with the same name and type, but
+ different resource data.
+ - If I(state) is C(absent) and I(overwrite) is C(True), this
+ module will remove the given resource record unconditionally.
+ If I(state) is C(absent) and I(overwrite) is C(False), this
+ module will fail if the provided record_data do not match exactly
+ with the existing resource record's record_data.
+ required: false
+ choices: [True, False]
+ default: False
+ service_account_email:
+ description:
+ - The e-mail address for a service account with access to Google
+ Cloud DNS.
+ required: false
+ default: null
+ pem_file:
+ description:
+ - The path to the PEM file associated with the service account
+ email.
+ - This option is deprecated and may be removed in a future release.
+ Use I(credentials_file) instead.
+ required: false
+ default: null
+ credentials_file:
+ description:
+ - The path to the JSON file associated with the service account
+ email.
+ required: false
+ default: null
+ project_id:
+ description:
+ - The Google Cloud Platform project ID to use.
+ required: false
+ default: null
+notes:
+ - See also M(gcdns_zone).
+ - This modules's underlying library does not support in-place updates for
+ DNS resource records. Instead, resource records are quickly deleted and
+ recreated.
+ - SOA records are technically supported, but their functionality is limited
+ to verifying that a zone's existing SOA record matches a pre-determined
+ value. The SOA record cannot be updated.
+ - Root NS records cannot be updated.
+ - NAPTR records are not supported.
+'''
+
+EXAMPLES = '''
+# Create an A record.
+- gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: A
+ value: '1.2.3.4'
+
+# Update an existing record.
+- gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: A
+ overwrite: true
+ value: '5.6.7.8'
+
+# Remove an A record.
+- gcdns_record:
+ record: 'www1.example.com'
+ zone_id: 'example-com'
+ state: absent
+ type: A
+ value: '5.6.7.8'
+
+# Create a CNAME record.
+- gcdns_record:
+ record: 'www.example.com'
+ zone_id: 'example-com'
+ type: CNAME
+ value: 'www.example.com.' # Note the trailing dot
+
+# Create an MX record with a custom TTL.
+- gcdns_record:
+ record: 'example.com'
+ zone: 'example.com'
+ type: MX
+ ttl: 3600
+ value: '10 mail.example.com.' # Note the trailing dot
+
+# Create multiple A records with the same name.
+- gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ type: A
+ record_data:
+ - '192.0.2.23'
+ - '10.4.5.6'
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+# Change the value of an existing record with multiple record_data.
+- gcdns_record:
+ record: 'api.example.com'
+ zone: 'example.com'
+ type: A
+ overwrite: true
+ record_data: # WARNING: All values in a record will be replaced
+ - '192.0.2.23'
+ - '192.0.2.42' # The changed record
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+# Safely remove a multi-line record.
+- gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ state: absent
+ type: A
+ record_data: # NOTE: All of the values must match exactly
+ - '192.0.2.23'
+ - '192.0.2.42'
+ - '198.51.100.5'
+ - '203.0.113.10'
+
+# Unconditionally remove a record.
+- gcdns_record:
+ record: 'api.example.com'
+ zone_id: 'example-com'
+ state: absent
+ overwrite: true # overwrite is true, so no values are needed
+ type: A
+
+# Create an AAAA record
+- gcdns_record:
+ record: 'www1.example.com'
+ zone: 'example.com'
+ type: AAAA
+ value: 'fd00:db8::1'
+
+# Create a PTR record
+- gcdns_record:
+ record: '10.5.168.192.in-addr.arpa'
+ zone: '5.168.192.in-addr.arpa'
+ type: PTR
+ value: 'api.example.com.' # Note the trailing dot.
+
+# Create an NS record
+- gcdns_record:
+ record: 'subdomain.example.com'
+ zone: 'example.com'
+ type: NS
+ ttl: 21600
+ record_data:
+ - 'ns-cloud-d1.googledomains.com.' # Note the trailing dots on values
+ - 'ns-cloud-d2.googledomains.com.'
+ - 'ns-cloud-d3.googledomains.com.'
+ - 'ns-cloud-d4.googledomains.com.'
+
+# Create a TXT record
+- gcdns_record:
+ record: 'example.com'
+ zone_id: 'example-com'
+ type: TXT
+ record_data:
+ - '"v=spf1 include:_spf.google.com -all"' # A single-string TXT value
+ - '"hello " "world"' # A multi-string TXT value
+'''
+
+RETURN = '''
+overwrite:
+ description: Whether to the module was allowed to overwrite the record
+ returned: success
+ type: boolean
+ sample: True
+record:
+ description: Fully-qualified domain name of the resource record
+ returned: success
+ type: string
+ sample: mail.example.com.
+state:
+ description: Whether the record is present or absent
+ returned: success
+ type: string
+ sample: present
+ttl:
+ description: The time-to-live of the resource record
+ returned: success
+ type: int
+ sample: 300
+type:
+ description: The type of the resource record
+ returned: success
+ type: string
+ sample: A
+record_data:
+ description: The resource record values
+ returned: success
+ type: list
+ sample: ['5.6.7.8', '9.10.11.12']
+zone:
+ description: The dns name of the zone
+ returned: success
+ type: string
+ sample: example.com.
+zone_id:
+ description: The Google Cloud DNS ID of the zone
+ returned: success
+ type: string
+ sample: example-com
+'''
+
+
+################################################################################
+# Imports
+################################################################################
+
+import socket
+from distutils.version import LooseVersion
+
+try:
+ from libcloud import __version__ as LIBCLOUD_VERSION
+ from libcloud.common.google import InvalidRequestError
+ from libcloud.common.types import LibcloudError
+ from libcloud.dns.types import Provider
+ from libcloud.dns.types import RecordDoesNotExistError
+ from libcloud.dns.types import ZoneDoesNotExistError
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+
+################################################################################
+# Constants
+################################################################################
+
+# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS
+# v1 API. Earlier versions contained the beta v1 API, which has since been
+# deprecated and decommissioned.
+MINIMUM_LIBCLOUD_VERSION = '0.19.0'
+
+# The libcloud Google Cloud DNS provider.
+PROVIDER = Provider.GOOGLE
+
+# The records that libcloud's Google Cloud DNS provider supports.
+#
+# Libcloud has a RECORD_TYPE_MAP dictionary in the provider that also contains
+# this information and is the authoritative source on which records are
+# supported, but accessing the dictionary requires creating a Google Cloud DNS
+# driver object, which is done in a helper module.
+#
+# I'm hard-coding the supported record types here, because they (hopefully!)
+# shouldn't change much, and it allows me to use it as a "choices" parameter
+# in an AnsibleModule argument_spec.
+SUPPORTED_RECORD_TYPES = [ 'A', 'AAAA', 'CNAME', 'SRV', 'TXT', 'SOA', 'NS', 'MX', 'SPF', 'PTR' ]
+
+
+################################################################################
+# Functions
+################################################################################
+
+def create_record(module, gcdns, zone, record):
+ """Creates or overwrites a resource record."""
+
+ overwrite = module.boolean(module.params['overwrite'])
+ record_name = module.params['record']
+ record_type = module.params['type']
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+ data = dict(ttl=ttl, rrdatas=record_data)
+
+ # Google Cloud DNS wants the trailing dot on all DNS names.
+ if record_name[-1] != '.':
+ record_name = record_name + '.'
+
+ # If we found a record, we need to check if the values match.
+ if record is not None:
+ # If the record matches, we obviously don't have to change anything.
+ if _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
+ return False
+
+ # The record doesn't match, so we need to check if we can overwrite it.
+ if not overwrite:
+ module.fail_json(
+ msg = 'cannot overwrite existing record, overwrite protection enabled',
+ changed = False
+ )
+
+ # The record either doesn't exist, or it exists and we can overwrite it.
+ if record is None and not module.check_mode:
+ # There's no existing record, so we'll just create it.
+ try:
+ gcdns.create_record(record_name, zone, record_type, data)
+ except InvalidRequestError as error:
+ if error.code == 'invalid':
+ # The resource record name and type are valid by themselves, but
+ # not when combined (e.g., an 'A' record with "www.example.com"
+ # as its value).
+ module.fail_json(
+ msg = 'value is invalid for the given type: ' +
+ "%s, got value: %s" % (record_type, record_data),
+ changed = False
+ )
+
+ elif error.code == 'cnameResourceRecordSetConflict':
+ # We're attempting to create a CNAME resource record when we
+ # already have another type of resource record with the name
+ # domain name.
+ module.fail_json(
+ msg = "non-CNAME resource record already exists: %s" % record_name,
+ changed = False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+ elif record is not None and not module.check_mode:
+ # The Google provider in libcloud doesn't support updating a record in
+ # place, so if the record already exists, we need to delete it and
+ # recreate it using the new information.
+ gcdns.delete_record(record)
+
+ try:
+ gcdns.create_record(record_name, zone, record_type, data)
+ except InvalidRequestError:
+ # Something blew up when creating the record. This will usually be a
+ # result of invalid value data in the new record. Unfortunately, we
+ # already changed the state of the record by deleting the old one,
+ # so we'll try to roll back before failing out.
+ try:
+ gcdns.create_record(record.name, record.zone, record.type, record.data)
+ module.fail_json(
+ msg = 'error updating record, the original record was restored',
+ changed = False
+ )
+ except LibcloudError:
+ # We deleted the old record, couldn't create the new record, and
+ # couldn't roll back. That really sucks. We'll dump the original
+ # record to the failure output so the user can resore it if
+ # necessary.
+ module.fail_json(
+ msg = 'error updating record, and could not restore original record, ' +
+ "original name: %s " % record.name +
+ "original zone: %s " % record.zone +
+ "original type: %s " % record.type +
+ "original data: %s" % record.data,
+ changed = True)
+
+ return True
+
+
+def remove_record(module, gcdns, record):
+ """Remove a resource record."""
+
+ overwrite = module.boolean(module.params['overwrite'])
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+
+ # If there is no record, we're obviously done.
+ if record is None:
+ return False
+
+ # If there is an existing record, do our values match the values of the
+ # existing record?
+ if not overwrite:
+ if not _records_match(record.data['ttl'], record.data['rrdatas'], ttl, record_data):
+ module.fail_json(
+ msg = 'cannot delete due to non-matching ttl or record_data: ' +
+ "ttl: %d, record_data: %s " % (ttl, record_data) +
+ "original ttl: %d, original record_data: %s" % (record.data['ttl'], record.data['rrdatas']),
+ changed = False
+ )
+
+ # If we got to this point, we're okay to delete the record.
+ if not module.check_mode:
+ gcdns.delete_record(record)
+
+ return True
+
+
+def _get_record(gcdns, zone, record_type, record_name):
+ """Gets the record object for a given FQDN."""
+
+ # The record ID is a combination of its type and FQDN. For example, the
+ # ID of an A record for www.example.com would be 'A:www.example.com.'
+ record_id = "%s:%s" % (record_type, record_name)
+
+ try:
+ return gcdns.get_record(zone.id, record_id)
+ except RecordDoesNotExistError:
+ return None
+
+
+def _get_zone(gcdns, zone_name, zone_id):
+ """Gets the zone object for a given domain name."""
+
+ if zone_id is not None:
+ try:
+ return gcdns.get_zone(zone_id)
+ except ZoneDoesNotExistError:
+ return None
+
+ # To create a zone, we need to supply a domain name. However, to delete a
+ # zone, we need to supply a zone ID. Zone ID's are often based on domain
+ # names, but that's not guaranteed, so we'll iterate through the list of
+ # zones to see if we can find a matching domain name.
+ available_zones = gcdns.iterate_zones()
+ found_zone = None
+
+ for zone in available_zones:
+ if zone.domain == zone_name:
+ found_zone = zone
+ break
+
+ return found_zone
+
+
+def _records_match(old_ttl, old_record_data, new_ttl, new_record_data):
+ """Checks to see if original and new TTL and values match."""
+
+ matches = True
+
+ if old_ttl != new_ttl:
+ matches = False
+ if old_record_data != new_record_data:
+ matches = False
+
+ return matches
+
+
+def _sanity_check(module):
+ """Run sanity checks that don't depend on info from the zone/record."""
+
+ overwrite = module.params['overwrite']
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+ ttl = module.params['ttl']
+ record_data = module.params['record_data']
+
+ # Apache libcloud needs to be installed and at least the minimum version.
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed = False
+ )
+ elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
+ module.fail_json(
+ msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed = False
+ )
+
+ # A negative TTL is not permitted (how would they even work?!).
+ if ttl < 0:
+ module.fail_json(
+ msg = 'TTL cannot be less than zero, got: %d' % ttl,
+ changed = False
+ )
+
+ # Deleting SOA records is not permitted.
+ if record_type == 'SOA' and state == 'absent':
+ module.fail_json(msg='cannot delete SOA records', changed=False)
+
+ # Updating SOA records is not permitted.
+ if record_type == 'SOA' and state == 'present' and overwrite:
+ module.fail_json(msg='cannot update SOA records', changed=False)
+
+ # Some sanity checks depend on what value was supplied.
+ if record_data is not None and (state == 'present' or not overwrite):
+ # A records must contain valid IPv4 addresses.
+ if record_type == 'A':
+ for value in record_data:
+ try:
+ socket.inet_aton(value)
+ except socket.error:
+ module.fail_json(
+ msg = 'invalid A record value, got: %s' % value,
+ changed = False
+ )
+
+ # AAAA records must contain valid IPv6 addresses.
+ if record_type == 'AAAA':
+ for value in record_data:
+ try:
+ socket.inet_pton(socket.AF_INET6, value)
+ except socket.error:
+ module.fail_json(
+ msg = 'invalid AAAA record value, got: %s' % value,
+ changed = False
+ )
+
+ # CNAME and SOA records can't have multiple values.
+ if record_type in ['CNAME', 'SOA'] and len(record_data) > 1:
+ module.fail_json(
+ msg = 'CNAME or SOA records cannot have more than one value, ' +
+ "got: %s" % record_data,
+ changed = False
+ )
+
+ # Google Cloud DNS does not support wildcard NS records.
+ if record_type == 'NS' and record_name[0] == '*':
+ module.fail_json(
+ msg = "wildcard NS records not allowed, got: %s" % record_name,
+ changed = False
+ )
+
+ # Values for txt records must begin and end with a double quote.
+ if record_type == 'TXT':
+ for value in record_data:
+ if value[0] != '"' and value[-1] != '"':
+ module.fail_json(
+ msg = 'TXT record_data must be enclosed in double quotes, ' +
+ 'got: %s' % value,
+ changed = False
+ )
+
+
+def _additional_sanity_checks(module, zone):
+ """Run input sanity checks that depend on info from the zone/record."""
+
+ overwrite = module.params['overwrite']
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+
+ # CNAME records are not allowed to have the same name as the root domain.
+ if record_type == 'CNAME' and record_name == zone.domain:
+ module.fail_json(
+ msg = 'CNAME records cannot match the zone name',
+ changed = False
+ )
+
+ # The root domain must always have an NS record.
+ if record_type == 'NS' and record_name == zone.domain and state == 'absent':
+ module.fail_json(
+ msg = 'cannot delete root NS records',
+ changed = False
+ )
+
+ # Updating NS records with the name as the root domain is not allowed
+ # because libcloud does not support in-place updates and root domain NS
+ # records cannot be removed.
+ if record_type == 'NS' and record_name == zone.domain and overwrite:
+ module.fail_json(
+ msg = 'cannot update existing root NS records',
+ changed = False
+ )
+
+ # SOA records with names that don't match the root domain are not permitted
+ # (and wouldn't make sense anyway).
+ if record_type == 'SOA' and record_name != zone.domain:
+ module.fail_json(
+ msg = 'non-root SOA records are not permitted, got: %s' % record_name,
+ changed = False
+ )
+
+
+################################################################################
+# Main
+################################################################################
+
+def main():
+ """Main function"""
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default='present', choices=['present', 'absent'], type='str'),
+ record = dict(required=True, aliases=['name'], type='str'),
+ zone = dict(type='str'),
+ zone_id = dict(type='str'),
+ type = dict(required=True, choices=SUPPORTED_RECORD_TYPES, type='str'),
+ record_data = dict(aliases=['value'], type='list'),
+ ttl = dict(default=300, type='int'),
+ overwrite = dict(default=False, type='bool'),
+ service_account_email = dict(type='str'),
+ pem_file = dict(type='path'),
+ credentials_file = dict(type='path'),
+ project_id = dict(type='str')
+ ),
+ required_if = [
+ ('state', 'present', ['record_data']),
+ ('overwrite', False, ['record_data'])
+ ],
+ required_one_of = [['zone', 'zone_id']],
+ supports_check_mode = True
+ )
+
+ _sanity_check(module)
+
+ record_name = module.params['record']
+ record_type = module.params['type']
+ state = module.params['state']
+ ttl = module.params['ttl']
+ zone_name = module.params['zone']
+ zone_id = module.params['zone_id']
+
+ json_output = dict(
+ state = state,
+ record = record_name,
+ zone = zone_name,
+ zone_id = zone_id,
+ type = record_type,
+ record_data = module.params['record_data'],
+ ttl = ttl,
+ overwrite = module.boolean(module.params['overwrite'])
+ )
+
+ # Google Cloud DNS wants the trailing dot on all DNS names.
+ if zone_name is not None and zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+ if record_name[-1] != '.':
+ record_name = record_name + '.'
+
+ # Build a connection object that we can use to connect with Google Cloud
+ # DNS.
+ gcdns = gcdns_connect(module, provider=PROVIDER)
+
+ # We need to check that the zone we're creating a record for actually
+ # exists.
+ zone = _get_zone(gcdns, zone_name, zone_id)
+ if zone is None and zone_name is not None:
+ module.fail_json(
+ msg = 'zone name was not found: %s' % zone_name,
+ changed = False
+ )
+ elif zone is None and zone_id is not None:
+ module.fail_json(
+ msg = 'zone id was not found: %s' % zone_id,
+ changed = False
+ )
+
+ # Populate the returns with the actual zone information.
+ json_output['zone'] = zone.domain
+ json_output['zone_id'] = zone.id
+
+ # We also need to check if the record we want to create or remove actually
+ # exists.
+ try:
+ record = _get_record(gcdns, zone, record_type, record_name)
+ except InvalidRequestError:
+ # We gave Google Cloud DNS an invalid DNS record name.
+ module.fail_json(
+ msg = 'record name is invalid: %s' % record_name,
+ changed = False
+ )
+
+ _additional_sanity_checks(module, zone)
+
+ diff = dict()
+
+ # Build the 'before' diff
+ if record is None:
+ diff['before'] = ''
+ diff['before_header'] = '<absent>'
+ else:
+ diff['before'] = dict(
+ record = record.data['name'],
+ type = record.data['type'],
+ record_data = record.data['rrdatas'],
+ ttl = record.data['ttl']
+ )
+ diff['before_header'] = "%s:%s" % (record_type, record_name)
+
+ # Create, remove, or modify the record.
+ if state == 'present':
+ diff['after'] = dict(
+ record = record_name,
+ type = record_type,
+ record_data = module.params['record_data'],
+ ttl = ttl
+ )
+ diff['after_header'] = "%s:%s" % (record_type, record_name)
+
+ changed = create_record(module, gcdns, zone, record)
+
+ elif state == 'absent':
+ diff['after'] = ''
+ diff['after_header'] = '<absent>'
+
+ changed = remove_record(module, gcdns, record)
+
+ module.exit_json(changed=changed, diff=diff, **json_output)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.gcdns import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/google/gcdns_zone.py b/lib/ansible/modules/cloud/google/gcdns_zone.py
new file mode 100644
index 0000000000..683cb88189
--- /dev/null
+++ b/lib/ansible/modules/cloud/google/gcdns_zone.py
@@ -0,0 +1,385 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2015 CallFire Inc.
+#
+# This file is part of Ansible.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+################################################################################
+# Documentation
+################################################################################
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: gcdns_zone
+short_description: Creates or removes zones in Google Cloud DNS
+description:
+ - Creates or removes managed zones in Google Cloud DNS.
+version_added: "2.2"
+author: "William Albert (@walbert947)"
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud >= 0.19.0"
+options:
+ state:
+ description:
+ - Whether the given zone should or should not be present.
+ required: false
+ choices: ["present", "absent"]
+ default: "present"
+ zone:
+ description:
+ - The DNS domain name of the zone.
+ - This is NOT the Google Cloud DNS zone ID (e.g., example-com). If
+ you attempt to specify a zone ID, this module will attempt to
+ create a TLD and will fail.
+ required: true
+ aliases: ['name']
+ description:
+ description:
+ - An arbitrary text string to use for the zone description.
+ required: false
+ default: ""
+ service_account_email:
+ description:
+ - The e-mail address for a service account with access to Google
+ Cloud DNS.
+ required: false
+ default: null
+ pem_file:
+ description:
+ - The path to the PEM file associated with the service account
+ email.
+ - This option is deprecated and may be removed in a future release.
+ Use I(credentials_file) instead.
+ required: false
+ default: null
+ credentials_file:
+ description:
+ - The path to the JSON file associated with the service account
+ email.
+ required: false
+ default: null
+ project_id:
+ description:
+ - The Google Cloud Platform project ID to use.
+ required: false
+ default: null
+notes:
+ - See also M(gcdns_record).
+ - Zones that are newly created must still be set up with a domain registrar
+ before they can be used.
+'''
+
+EXAMPLES = '''
+# Basic zone creation example.
+- name: Create a basic zone with the minimum number of parameters.
+ gcdns_zone: zone=example.com
+
+# Zone removal example.
+- name: Remove a zone.
+ gcdns_zone: zone=example.com state=absent
+
+# Zone creation with description
+- name: Creating a zone with a description
+ gcdns_zone: zone=example.com description="This is an awesome zone"
+'''
+
+RETURN = '''
+description:
+ description: The zone's description
+ returned: success
+ type: string
+ sample: This is an awesome zone
+state:
+ description: Whether the zone is present or absent
+ returned: success
+ type: string
+ sample: present
+zone:
+ description: The zone's DNS name
+ returned: success
+ type: string
+ sample: example.com.
+'''
+
+
+################################################################################
+# Imports
+################################################################################
+
+from distutils.version import LooseVersion
+
+try:
+ from libcloud import __version__ as LIBCLOUD_VERSION
+ from libcloud.common.google import InvalidRequestError
+ from libcloud.common.google import ResourceExistsError
+ from libcloud.common.google import ResourceNotFoundError
+ from libcloud.dns.types import Provider
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+
+################################################################################
+# Constants
+################################################################################
+
+# Apache libcloud 0.19.0 was the first to contain the non-beta Google Cloud DNS
+# v1 API. Earlier versions contained the beta v1 API, which has since been
+# deprecated and decommissioned.
+MINIMUM_LIBCLOUD_VERSION = '0.19.0'
+
+# The libcloud Google Cloud DNS provider.
+PROVIDER = Provider.GOOGLE
+
+# The URL used to verify ownership of a zone in Google Cloud DNS.
+ZONE_VERIFICATION_URL= 'https://www.google.com/webmasters/verification/'
+
+################################################################################
+# Functions
+################################################################################
+
+def create_zone(module, gcdns, zone):
+ """Creates a new Google Cloud DNS zone."""
+
+ description = module.params['description']
+ extra = dict(description = description)
+ zone_name = module.params['zone']
+
+ # Google Cloud DNS wants the trailing dot on the domain name.
+ if zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+
+ # If we got a zone back, then the domain exists.
+ if zone is not None:
+ return False
+
+ # The zone doesn't exist yet.
+ try:
+ if not module.check_mode:
+ gcdns.create_zone(domain=zone_name, extra=extra)
+ return True
+
+ except ResourceExistsError:
+ # The zone already exists. We checked for this already, so either
+ # Google is lying, or someone was a ninja and created the zone
+ # within milliseconds of us checking for its existence. In any case,
+ # the zone has already been created, so we have nothing more to do.
+ return False
+
+ except InvalidRequestError as error:
+ if error.code == 'invalid':
+ # The zone name or a parameter might be completely invalid. This is
+ # typically caused by an illegal DNS name (e.g. foo..com).
+ module.fail_json(
+ msg = "zone name is not a valid DNS name: %s" % zone_name,
+ changed = False
+ )
+
+ elif error.code == 'managedZoneDnsNameNotAvailable':
+ # Google Cloud DNS will refuse to create zones with certain domain
+ # names, such as TLDs, ccTLDs, or special domain names such as
+ # example.com.
+ module.fail_json(
+ msg = "zone name is reserved or already in use: %s" % zone_name,
+ changed = False
+ )
+
+ elif error.code == 'verifyManagedZoneDnsNameOwnership':
+ # This domain name needs to be verified before Google will create
+ # it. This occurs when a user attempts to create a zone which shares
+ # a domain name with a zone hosted elsewhere in Google Cloud DNS.
+ module.fail_json(
+ msg = "ownership of zone %s needs to be verified at %s" % (zone_name, ZONE_VERIFICATION_URL),
+ changed = False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+
+def remove_zone(module, gcdns, zone):
+ """Removes an existing Google Cloud DNS zone."""
+
+ # If there's no zone, then we're obviously done.
+ if zone is None:
+ return False
+
+ # An empty zone will have two resource records:
+ # 1. An NS record with a list of authoritative name servers
+ # 2. An SOA record
+ # If any additional resource records are present, Google Cloud DNS will
+ # refuse to remove the zone.
+ if len(zone.list_records()) > 2:
+ module.fail_json(
+ msg = "zone is not empty and cannot be removed: %s" % zone.domain,
+ changed = False
+ )
+
+ try:
+ if not module.check_mode:
+ gcdns.delete_zone(zone)
+ return True
+
+ except ResourceNotFoundError:
+ # When we performed our check, the zone existed. It may have been
+ # deleted by something else. It's gone, so whatever.
+ return False
+
+ except InvalidRequestError as error:
+ if error.code == 'containerNotEmpty':
+ # When we performed our check, the zone existed and was empty. In
+ # the milliseconds between the check and the removal command,
+ # records were added to the zone.
+ module.fail_json(
+ msg = "zone is not empty and cannot be removed: %s" % zone.domain,
+ changed = False
+ )
+
+ else:
+ # The error is something else that we don't know how to handle,
+ # so we'll just re-raise the exception.
+ raise
+
+
+def _get_zone(gcdns, zone_name):
+ """Gets the zone object for a given domain name."""
+
+ # To create a zone, we need to supply a zone name. However, to delete a
+ # zone, we need to supply a zone ID. Zone ID's are often based on zone
+ # names, but that's not guaranteed, so we'll iterate through the list of
+ # zones to see if we can find a matching name.
+ available_zones = gcdns.iterate_zones()
+ found_zone = None
+
+ for zone in available_zones:
+ if zone.domain == zone_name:
+ found_zone = zone
+ break
+
+ return found_zone
+
+def _sanity_check(module):
+ """Run module sanity checks."""
+
+ zone_name = module.params['zone']
+
+ # Apache libcloud needs to be installed and at least the minimum version.
+ if not HAS_LIBCLOUD:
+ module.fail_json(
+ msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed = False
+ )
+ elif LooseVersion(LIBCLOUD_VERSION) < MINIMUM_LIBCLOUD_VERSION:
+ module.fail_json(
+ msg = 'This module requires Apache libcloud %s or greater' % MINIMUM_LIBCLOUD_VERSION,
+ changed = False
+ )
+
+ # Google Cloud DNS does not support the creation of TLDs.
+ if '.' not in zone_name or len([label for label in zone_name.split('.') if label]) == 1:
+ module.fail_json(
+ msg = 'cannot create top-level domain: %s' % zone_name,
+ changed = False
+ )
+
+################################################################################
+# Main
+################################################################################
+
+def main():
+ """Main function"""
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default='present', choices=['present', 'absent'], type='str'),
+ zone = dict(required=True, aliases=['name'], type='str'),
+ description = dict(default='', type='str'),
+ service_account_email = dict(type='str'),
+ pem_file = dict(type='path'),
+ credentials_file = dict(type='path'),
+ project_id = dict(type='str')
+ ),
+ supports_check_mode = True
+ )
+
+ _sanity_check(module)
+
+ zone_name = module.params['zone']
+ state = module.params['state']
+
+ # Google Cloud DNS wants the trailing dot on the domain name.
+ if zone_name[-1] != '.':
+ zone_name = zone_name + '.'
+
+ json_output = dict(
+ state = state,
+ zone = zone_name,
+ description = module.params['description']
+ )
+
+ # Build a connection object that was can use to connect with Google
+ # Cloud DNS.
+ gcdns = gcdns_connect(module, provider=PROVIDER)
+
+ # We need to check if the zone we're attempting to create already exists.
+ zone = _get_zone(gcdns, zone_name)
+
+ diff = dict()
+
+ # Build the 'before' diff
+ if zone is None:
+ diff['before'] = ''
+ diff['before_header'] = '<absent>'
+ else:
+ diff['before'] = dict(
+ zone = zone.domain,
+ description = zone.extra['description']
+ )
+ diff['before_header'] = zone_name
+
+ # Create or remove the zone.
+ if state == 'present':
+ diff['after'] = dict(
+ zone = zone_name,
+ description = module.params['description']
+ )
+ diff['after_header'] = zone_name
+
+ changed = create_zone(module, gcdns, zone)
+
+ elif state == 'absent':
+ diff['after'] = ''
+ diff['after_header'] = '<absent>'
+
+ changed = remove_zone(module, gcdns, zone)
+
+ module.exit_json(changed=changed, diff=diff, **json_output)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.gcdns import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/google/gce_img.py b/lib/ansible/modules/cloud/google/gce_img.py
new file mode 100644
index 0000000000..e340808539
--- /dev/null
+++ b/lib/ansible/modules/cloud/google/gce_img.py
@@ -0,0 +1,233 @@
+#!/usr/bin/python
+# Copyright 2015 Google Inc. All Rights Reserved.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+"""An Ansible module to utilize GCE image resources."""
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: gce_img
+version_added: "1.9"
+short_description: utilize GCE image resources
+description:
+ - This module can create and delete GCE private images from gzipped
+ compressed tarball containing raw disk data or from existing detached
+ disks in any zone. U(https://cloud.google.com/compute/docs/images)
+options:
+ name:
+ description:
+ - the name of the image to create or delete
+ required: true
+ default: null
+ description:
+ description:
+ - an optional description
+ required: false
+ default: null
+ family:
+ description:
+ - an optional family name
+ required: false
+ default: null
+ version_added: "2.2"
+ source:
+ description:
+ - the source disk or the Google Cloud Storage URI to create the image from
+ required: false
+ default: null
+ state:
+ description:
+ - desired state of the image
+ required: false
+ default: "present"
+ choices: ["present", "absent"]
+ zone:
+ description:
+ - the zone of the disk specified by source
+ required: false
+ default: "us-central1-a"
+ timeout:
+ description:
+ - timeout for the operation
+ required: false
+ default: 180
+ version_added: "2.0"
+ service_account_email:
+ description:
+ - service account email
+ required: false
+ default: null
+ pem_file:
+ description:
+ - path to the pem file associated with the service account email
+ required: false
+ default: null
+ project_id:
+ description:
+ - your GCE project ID
+ required: false
+ default: null
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud"
+author: "Tom Melendez (supertom)"
+'''
+
+EXAMPLES = '''
+# Create an image named test-image from the disk 'test-disk' in zone us-central1-a.
+- gce_img:
+ name: test-image
+ source: test-disk
+ zone: us-central1-a
+ state: present
+
+# Create an image named test-image from a tarball in Google Cloud Storage.
+- gce_img:
+ name: test-image
+ source: https://storage.googleapis.com/bucket/path/to/image.tgz
+
+# Alternatively use the gs scheme
+- gce_img:
+ name: test-image
+ source: gs://bucket/path/to/image.tgz
+
+# Delete an image named test-image.
+- gce_img:
+ name: test-image
+ state: absent
+'''
+
+
+try:
+ import libcloud
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError
+ from libcloud.common.google import ResourceExistsError
+ from libcloud.common.google import ResourceNotFoundError
+ _ = Provider.GCE
+ has_libcloud = True
+except ImportError:
+ has_libcloud = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.gce import gce_connect
+
+
+GCS_URI = 'https://storage.googleapis.com/'
+
+
+def create_image(gce, name, module):
+ """Create an image with the specified name."""
+ source = module.params.get('source')
+ zone = module.params.get('zone')
+ desc = module.params.get('description')
+ timeout = module.params.get('timeout')
+ family = module.params.get('family')
+
+ if not source:
+ module.fail_json(msg='Must supply a source', changed=False)
+
+ if source.startswith(GCS_URI):
+ # source is a Google Cloud Storage URI
+ volume = source
+ elif source.startswith('gs://'):
+ # libcloud only accepts https URI.
+ volume = source.replace('gs://', GCS_URI)
+ else:
+ try:
+ volume = gce.ex_get_volume(source, zone)
+ except ResourceNotFoundError:
+ module.fail_json(msg='Disk %s not found in zone %s' % (source, zone),
+ changed=False)
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+
+ gce_extra_args = {}
+ if family is not None:
+ gce_extra_args['family'] = family
+
+ old_timeout = gce.connection.timeout
+ try:
+ gce.connection.timeout = timeout
+ gce.ex_create_image(name, volume, desc, use_existing=False, **gce_extra_args)
+ return True
+ except ResourceExistsError:
+ return False
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+ finally:
+ gce.connection.timeout = old_timeout
+
+
+def delete_image(gce, name, module):
+ """Delete a specific image resource by name."""
+ try:
+ gce.ex_delete_image(name)
+ return True
+ except ResourceNotFoundError:
+ return False
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ family=dict(),
+ description=dict(),
+ source=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ zone=dict(default='us-central1-a'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ project_id=dict(),
+ timeout=dict(type='int', default=180)
+ )
+ )
+
+ if not has_libcloud:
+ module.fail_json(msg='libcloud with GCE support is required.')
+
+ gce = gce_connect(module)
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+ family = module.params.get('family')
+ changed = False
+
+ if family is not None and hasattr(libcloud, '__version__') and libcloud.__version__ <= '0.20.1':
+ module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'family' option",
+ changed=False)
+
+ # user wants to create an image.
+ if state == 'present':
+ changed = create_image(gce, name, module)
+
+ # user wants to delete the image.
+ if state == 'absent':
+ changed = delete_image(gce, name, module)
+
+ module.exit_json(changed=changed, name=name)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/google/gce_tag.py b/lib/ansible/modules/cloud/google/gce_tag.py
new file mode 100644
index 0000000000..7122a2398a
--- /dev/null
+++ b/lib/ansible/modules/cloud/google/gce_tag.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: gce_tag
+version_added: "2.0"
+short_description: add or remove tag(s) to/from GCE instance
+description:
+ - This module can add or remove tags U(https://cloud.google.com/compute/docs/instances/#tags)
+ to/from GCE instance.
+options:
+ instance_name:
+ description:
+ - the name of the GCE instance to add/remove tags
+ required: true
+ default: null
+ aliases: []
+ tags:
+ description:
+ - comma-separated list of tags to add or remove
+ required: true
+ default: null
+ aliases: []
+ state:
+ description:
+ - desired state of the tags
+ required: false
+ default: "present"
+ choices: ["present", "absent"]
+ aliases: []
+ zone:
+ description:
+ - the zone of the disk specified by source
+ required: false
+ default: "us-central1-a"
+ aliases: []
+ service_account_email:
+ description:
+ - service account email
+ required: false
+ default: null
+ aliases: []
+ pem_file:
+ description:
+ - path to the pem file associated with the service account email
+ required: false
+ default: null
+ aliases: []
+ project_id:
+ description:
+ - your GCE project ID
+ required: false
+ default: null
+ aliases: []
+
+requirements:
+ - "python >= 2.6"
+ - "apache-libcloud"
+author: "Do Hoang Khiem (dohoangkhiem@gmail.com)"
+'''
+
+EXAMPLES = '''
+# Add tags 'http-server', 'https-server', 'staging' to instance name 'staging-server' in zone us-central1-a.
+- gce_tag:
+ instance_name: staging-server
+ tags: http-server,https-server,staging
+ zone: us-central1-a
+ state: present
+
+# Remove tags 'foo', 'bar' from instance 'test-server' in default zone (us-central1-a)
+- gce_tag:
+ instance_name: test-server
+ tags: foo,bar
+ state: absent
+
+'''
+
+try:
+ from libcloud.compute.types import Provider
+ from libcloud.compute.providers import get_driver
+ from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
+ ResourceExistsError, ResourceNotFoundError, InvalidRequestError
+
+ _ = Provider.GCE
+ HAS_LIBCLOUD = True
+except ImportError:
+ HAS_LIBCLOUD = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.gce import gce_connect
+
+
+def add_tags(gce, module, instance_name, tags):
+ """Add tags to instance."""
+ zone = module.params.get('zone')
+
+ if not instance_name:
+ module.fail_json(msg='Must supply instance_name', changed=False)
+
+ if not tags:
+ module.fail_json(msg='Must supply tags', changed=False)
+
+ tags = [x.lower() for x in tags]
+
+ try:
+ node = gce.ex_get_node(instance_name, zone=zone)
+ except ResourceNotFoundError:
+ module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False)
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+
+ node_tags = node.extra['tags']
+ changed = False
+ tags_changed = []
+
+ for t in tags:
+ if t not in node_tags:
+ changed = True
+ node_tags.append(t)
+ tags_changed.append(t)
+
+ if not changed:
+ return False, None
+
+ try:
+ gce.ex_set_node_tags(node, node_tags)
+ return True, tags_changed
+ except (GoogleBaseError, InvalidRequestError) as e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def remove_tags(gce, module, instance_name, tags):
+ """Remove tags from instance."""
+ zone = module.params.get('zone')
+
+ if not instance_name:
+ module.fail_json(msg='Must supply instance_name', changed=False)
+
+ if not tags:
+ module.fail_json(msg='Must supply tags', changed=False)
+
+ tags = [x.lower() for x in tags]
+
+ try:
+ node = gce.ex_get_node(instance_name, zone=zone)
+ except ResourceNotFoundError:
+ module.fail_json(msg='Instance %s not found in zone %s' % (instance_name, zone), changed=False)
+ except GoogleBaseError as e:
+ module.fail_json(msg=str(e), changed=False)
+
+ node_tags = node.extra['tags']
+
+ changed = False
+ tags_changed = []
+
+ for t in tags:
+ if t in node_tags:
+ node_tags.remove(t)
+ changed = True
+ tags_changed.append(t)
+
+ if not changed:
+ return False, None
+
+ try:
+ gce.ex_set_node_tags(node, node_tags)
+ return True, tags_changed
+ except (GoogleBaseError, InvalidRequestError) as e:
+ module.fail_json(msg=str(e), changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_name=dict(required=True),
+ tags=dict(type='list'),
+ state=dict(default='present', choices=['present', 'absent']),
+ zone=dict(default='us-central1-a'),
+ service_account_email=dict(),
+ pem_file=dict(type='path'),
+ project_id=dict(),
+ )
+ )
+
+ if not HAS_LIBCLOUD:
+ module.fail_json(msg='libcloud with GCE support is required.')
+
+ instance_name = module.params.get('instance_name')
+ state = module.params.get('state')
+ tags = module.params.get('tags')
+ zone = module.params.get('zone')
+ changed = False
+
+ if not zone:
+ module.fail_json(msg='Must specify "zone"', changed=False)
+
+ if not tags:
+ module.fail_json(msg='Must specify "tags"', changed=False)
+
+ gce = gce_connect(module)
+
+ # add tags to instance.
+ if state == 'present':
+ changed, tags_changed = add_tags(gce, module, instance_name, tags)
+
+ # remove tags from instance
+ if state == 'absent':
+ changed, tags_changed = remove_tags(gce, module, instance_name, tags)
+
+ module.exit_json(changed=changed, instance_name=instance_name, tags=tags_changed, zone=zone)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/lxc/__init__.py b/lib/ansible/modules/cloud/lxc/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/cloud/lxc/__init__.py
diff --git a/lib/ansible/modules/cloud/lxc/lxc_container.py b/lib/ansible/modules/cloud/lxc/lxc_container.py
new file mode 100644
index 0000000000..d3b6804ce5
--- /dev/null
+++ b/lib/ansible/modules/cloud/lxc/lxc_container.py
@@ -0,0 +1,1765 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Kevin Carter <kevin.carter@rackspace.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = """
+---
+module: lxc_container
+short_description: Manage LXC Containers
+version_added: 1.8.0
+description:
+ - Management of LXC containers
+author: "Kevin Carter (@cloudnull)"
+options:
+ name:
+ description:
+ - Name of a container.
+ required: true
+ backing_store:
+ choices:
+ - dir
+ - lvm
+ - loop
+ - btrfs
+ - overlayfs
+ - zfs
+ description:
+ - Backend storage type for the container.
+ required: false
+ default: dir
+ template:
+ description:
+ - Name of the template to use within an LXC create.
+ required: false
+ default: ubuntu
+ template_options:
+ description:
+ - Template options when building the container.
+ required: false
+ config:
+ description:
+ - Path to the LXC configuration file.
+ required: false
+ default: null
+ lv_name:
+ description:
+ - Name of the logical volume, defaults to the container name.
+ default: $CONTAINER_NAME
+ required: false
+ vg_name:
+ description:
+ - If Backend store is lvm, specify the name of the volume group.
+ default: lxc
+ required: false
+ thinpool:
+ description:
+ - Use LVM thin pool called TP.
+ required: false
+ fs_type:
+ description:
+ - Create fstype TYPE.
+ default: ext4
+ required: false
+ fs_size:
+ description:
+ - File system Size.
+ default: 5G
+ required: false
+ directory:
+ description:
+ - Place rootfs directory under DIR.
+ required: false
+ zfs_root:
+ description:
+ - Create zfs under given zfsroot.
+ required: false
+ container_command:
+ description:
+ - Run a command within a container.
+ required: false
+ lxc_path:
+ description:
+ - Place container under PATH
+ required: false
+ container_log:
+ choices:
+ - true
+ - false
+ description:
+ - Enable a container log for host actions to the container.
+ default: false
+ container_log_level:
+ choices:
+ - INFO
+ - ERROR
+ - DEBUG
+ description:
+ - Set the log level for a container where *container_log* was set.
+ required: false
+ default: INFO
+ clone_name:
+ version_added: "2.0"
+ description:
+ - Name of the new cloned server. This is only used when state is
+ clone.
+ required: false
+ default: false
+ clone_snapshot:
+ version_added: "2.0"
+ required: false
+ choices:
+ - true
+ - false
+ description:
+ - Create a snapshot a container when cloning. This is not supported
+ by all container storage backends. Enabling this may fail if the
+ backing store does not support snapshots.
+ default: false
+ archive:
+ choices:
+ - true
+ - false
+ description:
+ - Create an archive of a container. This will create a tarball of the
+ running container.
+ default: false
+ archive_path:
+ description:
+ - Path the save the archived container. If the path does not exist
+ the archive method will attempt to create it.
+ default: null
+ archive_compression:
+ choices:
+ - gzip
+ - bzip2
+ - none
+ description:
+ - Type of compression to use when creating an archive of a running
+ container.
+ default: gzip
+ state:
+ choices:
+ - started
+ - stopped
+ - restarted
+ - absent
+ - frozen
+ description:
+ - Define the state of a container. If you clone a container using
+ `clone_name` the newly cloned container created in a stopped state.
+ The running container will be stopped while the clone operation is
+ happening and upon completion of the clone the original container
+ state will be restored.
+ required: false
+ default: started
+ container_config:
+ description:
+ - list of 'key=value' options to use when configuring a container.
+ required: false
+requirements:
+ - 'lxc >= 1.0 # OS package'
+ - 'python >= 2.6 # OS Package'
+ - 'lxc-python2 >= 0.1 # PIP Package from https://github.com/lxc/python2-lxc'
+notes:
+ - Containers must have a unique name. If you attempt to create a container
+ with a name that already exists in the users namespace the module will
+ simply return as "unchanged".
+ - The "container_command" can be used with any state except "absent". If
+ used with state "stopped" the container will be "started", the command
+ executed, and then the container "stopped" again. Likewise if the state
+ is "stopped" and the container does not exist it will be first created,
+ "started", the command executed, and then "stopped". If you use a "|"
+ in the variable you can use common script formatting within the variable
+ iteself The "container_command" option will always execute as BASH.
+ When using "container_command" a log file is created in the /tmp/ directory
+ which contains both stdout and stderr of any command executed.
+ - If "archive" is **true** the system will attempt to create a compressed
+ tarball of the running container. The "archive" option supports LVM backed
+ containers and will create a snapshot of the running container when
+ creating the archive.
+ - If your distro does not have a package for "python2-lxc", which is a
+ requirement for this module, it can be installed from source at
+ "https://github.com/lxc/python2-lxc" or installed via pip using the package
+ name lxc-python2.
+"""
+
+EXAMPLES = """
+- name: Create a started container
+ lxc_container:
+ name: test-container-started
+ container_log: true
+ template: ubuntu
+ state: started
+ template_options: --release trusty
+
+- name: Create a stopped container
+ lxc_container:
+ name: test-container-stopped
+ container_log: true
+ template: ubuntu
+ state: stopped
+ template_options: --release trusty
+
+- name: Create a frozen container
+ lxc_container:
+ name: test-container-frozen
+ container_log: true
+ template: ubuntu
+ state: frozen
+ template_options: --release trusty
+ container_command: |
+ echo 'hello world.' | tee /opt/started-frozen
+
+# Create filesystem container, configure it, and archive it, and start it.
+- name: Create filesystem container
+ lxc_container:
+ name: test-container-config
+ backing_store: dir
+ container_log: true
+ template: ubuntu
+ state: started
+ archive: true
+ archive_compression: none
+ container_config:
+ - "lxc.aa_profile=unconfined"
+ - "lxc.cgroup.devices.allow=a *:* rmw"
+ template_options: --release trusty
+
+# Create an lvm container, run a complex command in it, add additional
+# configuration to it, create an archive of it, and finally leave the container
+# in a frozen state. The container archive will be compressed using bzip2
+- name: Create a frozen lvm container
+ lxc_container:
+ name: test-container-lvm
+ container_log: true
+ template: ubuntu
+ state: frozen
+ backing_store: lvm
+ template_options: --release trusty
+ container_command: |
+ apt-get update
+ apt-get install -y vim lxc-dev
+ echo 'hello world.' | tee /opt/started
+ if [[ -f "/opt/started" ]]; then
+ echo 'hello world.' | tee /opt/found-started
+ fi
+ container_config:
+ - "lxc.aa_profile=unconfined"
+ - "lxc.cgroup.devices.allow=a *:* rmw"
+ archive: true
+ archive_compression: bzip2
+ register: lvm_container_info
+
+- name: Debug info on container "test-container-lvm"
+ debug:
+ var: lvm_container_info
+
+- name: Run a command in a container and ensure its in a "stopped" state.
+ lxc_container:
+ name: test-container-started
+ state: stopped
+ container_command: |
+ echo 'hello world.' | tee /opt/stopped
+
+- name: Run a command in a container and ensure its it in a "frozen" state.
+ lxc_container:
+ name: test-container-stopped
+ state: frozen
+ container_command: |
+ echo 'hello world.' | tee /opt/frozen
+
+- name: Start a container
+ lxc_container:
+ name: test-container-stopped
+ state: started
+
+- name: Run a command in a container and then restart it
+ lxc_container:
+ name: test-container-started
+ state: restarted
+ container_command: |
+ echo 'hello world.' | tee /opt/restarted
+
+- name: Run a complex command within a "running" container
+ lxc_container:
+ name: test-container-started
+ container_command: |
+ apt-get update
+ apt-get install -y curl wget vim apache2
+ echo 'hello world.' | tee /opt/started
+ if [[ -f "/opt/started" ]]; then
+ echo 'hello world.' | tee /opt/found-started
+ fi
+
+# Create an archive of an existing container, save the archive to a defined
+# path and then destroy it.
+- name: Archive container
+ lxc_container:
+ name: test-container-started
+ state: absent
+ archive: true
+ archive_path: /opt/archives
+
+# Create a container using overlayfs, create an archive of it, create a
+# snapshot clone of the container and and finally leave the container
+# in a frozen state. The container archive will be compressed using gzip.
+- name: Create an overlayfs container archive and clone it
+ lxc_container:
+ name: test-container-overlayfs
+ container_log: true
+ template: ubuntu
+ state: started
+ backing_store: overlayfs
+ template_options: --release trusty
+ clone_snapshot: true
+ clone_name: test-container-overlayfs-clone-snapshot
+ archive: true
+ archive_compression: gzip
+ register: clone_container_info
+
+- name: debug info on container "test-container"
+ debug:
+ var: clone_container_info
+
+- name: Clone a container using snapshot
+ lxc_container:
+ name: test-container-overlayfs-clone-snapshot
+ backing_store: overlayfs
+ clone_name: test-container-overlayfs-clone-snapshot2
+ clone_snapshot: true
+
+- name: Create a new container and clone it
+ lxc_container:
+ name: test-container-new-archive
+ backing_store: dir
+ clone_name: test-container-new-archive-clone
+
+- name: Archive and clone a container then destroy it
+ lxc_container:
+ name: test-container-new-archive
+ state: absent
+ clone_name: test-container-new-archive-destroyed-clone
+ archive: true
+ archive_compression: gzip
+
+- name: Start a cloned container.
+ lxc_container:
+ name: test-container-new-archive-destroyed-clone
+ state: started
+
+- name: Destroy a container
+ lxc_container:
+ name: '{{ item }}'
+ state: absent
+ with_items:
+ - test-container-stopped
+ - test-container-started
+ - test-container-frozen
+ - test-container-lvm
+ - test-container-config
+ - test-container-overlayfs
+ - test-container-overlayfs-clone
+ - test-container-overlayfs-clone-snapshot
+ - test-container-overlayfs-clone-snapshot2
+ - test-container-new-archive
+ - test-container-new-archive-clone
+ - test-container-new-archive-destroyed-clone
+"""
+
+RETURN="""
+lxc_container:
+ description: container information
+ returned: success
+ type: list
+ contains:
+ name:
+ description: name of the lxc container
+ returned: success
+ type: string
+ sample: test_host
+ init_pid:
+ description: pid of the lxc init process
+ returned: success
+ type: int
+ sample: 19786
+ interfaces:
+ description: list of the container's network interfaces
+ returned: success
+ type: list
+ sample: [ "eth0", "lo" ]
+ ips:
+ description: list of ips
+ returned: success
+ type: list
+ sample: [ "10.0.3.3" ]
+ state:
+ description: resulting state of the container
+ returned: success
+ type: string
+ sample: "running"
+ archive:
+ description: resulting state of the container
+ returned: success, when archive is true
+ type: string
+ sample: "/tmp/test-container-config.tar"
+ clone:
+ description: if the container was cloned
+ returned: success, when clone_name is specified
+ type: boolean
+ sample: True
+"""
+
+import re
+
+try:
+ import lxc
+except ImportError:
+ HAS_LXC = False
+else:
+ HAS_LXC = True
+
+
+# LXC_COMPRESSION_MAP is a map of available compression types when creating
+# an archive of a container.
+LXC_COMPRESSION_MAP = {
+ 'gzip': {
+ 'extension': 'tar.tgz',
+ 'argument': '-czf'
+ },
+ 'bzip2': {
+ 'extension': 'tar.bz2',
+ 'argument': '-cjf'
+ },
+ 'none': {
+ 'extension': 'tar',
+ 'argument': '-cf'
+ }
+}
+
+
+# LXC_COMMAND_MAP is a map of variables that are available to a method based
+# on the state the container is in.
+LXC_COMMAND_MAP = {
+ 'create': {
+ 'variables': {
+ 'config': '--config',
+ 'template': '--template',
+ 'backing_store': '--bdev',
+ 'lxc_path': '--lxcpath',
+ 'lv_name': '--lvname',
+ 'vg_name': '--vgname',
+ 'thinpool': '--thinpool',
+ 'fs_type': '--fstype',
+ 'fs_size': '--fssize',
+ 'directory': '--dir',
+ 'zfs_root': '--zfsroot'
+ }
+ },
+ 'clone': {
+ 'variables': {
+ 'backing_store': '--backingstore',
+ 'lxc_path': '--lxcpath',
+ 'fs_size': '--fssize',
+ 'name': '--orig',
+ 'clone_name': '--new'
+ }
+ }
+}
+
+
+# LXC_BACKING_STORE is a map of available storage backends and options that
+# are incompatible with the given storage backend.
+LXC_BACKING_STORE = {
+ 'dir': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
+ ],
+ 'lvm': [
+ 'zfs_root'
+ ],
+ 'btrfs': [
+ 'lv_name', 'vg_name', 'thinpool', 'zfs_root', 'fs_type', 'fs_size'
+ ],
+ 'loop': [
+ 'lv_name', 'vg_name', 'thinpool', 'zfs_root'
+ ],
+ 'overlayfs': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool', 'zfs_root'
+ ],
+ 'zfs': [
+ 'lv_name', 'vg_name', 'fs_type', 'fs_size', 'thinpool'
+ ]
+}
+
+
+# LXC_LOGGING_LEVELS is a map of available log levels
+LXC_LOGGING_LEVELS = {
+ 'INFO': ['info', 'INFO', 'Info'],
+ 'ERROR': ['error', 'ERROR', 'Error'],
+ 'DEBUG': ['debug', 'DEBUG', 'Debug']
+}
+
+
+# LXC_ANSIBLE_STATES is a map of states that contain values of methods used
+# when a particular state is evoked.
+LXC_ANSIBLE_STATES = {
+ 'started': '_started',
+ 'stopped': '_stopped',
+ 'restarted': '_restarted',
+ 'absent': '_destroyed',
+ 'frozen': '_frozen',
+ 'clone': '_clone'
+}
+
+
+# This is used to attach to a running container and execute commands from
+# within the container on the host. This will provide local access to a
+# container without using SSH. The template will attempt to work within the
+# home directory of the user that was attached to the container and source
+# that users environment variables by default.
+ATTACH_TEMPLATE = """#!/usr/bin/env bash
+pushd "$(getent passwd $(whoami)|cut -f6 -d':')"
+ if [[ -f ".bashrc" ]];then
+ source .bashrc
+ fi
+popd
+
+# User defined command
+%(container_command)s
+"""
+
+
+def create_script(command):
+ """Write out a script onto a target.
+
+ This method should be backward compatible with Python 2.4+ when executing
+ from within the container.
+
+ :param command: command to run, this can be a script and can use spacing
+ with newlines as separation.
+ :type command: ``str``
+ """
+
+ import os
+ import os.path as path
+ import subprocess
+ import tempfile
+
+ (fd, script_file) = tempfile.mkstemp(prefix='lxc-attach-script')
+ f = os.fdopen(fd, 'wb')
+ try:
+ f.write(ATTACH_TEMPLATE % {'container_command': command})
+ f.flush()
+ finally:
+ f.close()
+
+ # Ensure the script is executable.
+ os.chmod(script_file, int('0700',8))
+
+ # Output log file.
+ stdout_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-log')[0], 'ab')
+
+ # Error log file.
+ stderr_file = os.fdopen(tempfile.mkstemp(prefix='lxc-attach-script-err')[0], 'ab')
+
+ # Execute the script command.
+ try:
+ subprocess.Popen(
+ [script_file],
+ stdout=stdout_file,
+ stderr=stderr_file
+ ).communicate()
+ finally:
+ # Close the log files.
+ stderr_file.close()
+ stdout_file.close()
+
+ # Remove the script file upon completion of execution.
+ os.remove(script_file)
+
+
+class LxcContainerManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.state = self.module.params.get('state', None)
+ self.state_change = False
+ self.lxc_vg = None
+ self.lxc_path = self.module.params.get('lxc_path', None)
+ self.container_name = self.module.params['name']
+ self.container = self.get_container_bind()
+ self.archive_info = None
+ self.clone_info = None
+
+ def get_container_bind(self):
+ return lxc.Container(name=self.container_name)
+
+ @staticmethod
+ def _roundup(num):
+ """Return a rounded floating point number.
+
+ :param num: Number to round up.
+ :type: ``float``
+ :returns: Rounded up number.
+ :rtype: ``int``
+ """
+ num, part = str(num).split('.')
+ num = int(num)
+ if int(part) != 0:
+ num += 1
+ return num
+
+ @staticmethod
+ def _container_exists(container_name, lxc_path=None):
+ """Check if a container exists.
+
+ :param container_name: Name of the container.
+ :type: ``str``
+ :returns: True or False if the container is found.
+ :rtype: ``bol``
+ """
+ if [i for i in lxc.list_containers(config_path=lxc_path) if i == container_name]:
+ return True
+ else:
+ return False
+
+ @staticmethod
+ def _add_variables(variables_dict, build_command):
+ """Return a command list with all found options.
+
+ :param variables_dict: Pre-parsed optional variables used from a
+ seed command.
+ :type variables_dict: ``dict``
+ :param build_command: Command to run.
+ :type build_command: ``list``
+ :returns: list of command options.
+ :rtype: ``list``
+ """
+
+ for key, value in variables_dict.items():
+ build_command.append(
+ '%s %s' % (key, value)
+ )
+ else:
+ return build_command
+
+ def _get_vars(self, variables):
+ """Return a dict of all variables as found within the module.
+
+ :param variables: Hash of all variables to find.
+ :type variables: ``dict``
+ """
+
+ # Remove incompatible storage backend options.
+ variables = variables.copy()
+ for v in LXC_BACKING_STORE[self.module.params['backing_store']]:
+ variables.pop(v, None)
+
+ return_dict = dict()
+ false_values = [None, ''] + BOOLEANS_FALSE
+ for k, v in variables.items():
+ _var = self.module.params.get(k)
+ if _var not in false_values:
+ return_dict[v] = _var
+ else:
+ return return_dict
+
+ def _run_command(self, build_command, unsafe_shell=False):
+ """Return information from running an Ansible Command.
+
+ This will squash the build command list into a string and then
+ execute the command via Ansible. The output is returned to the method.
+ This output is returned as `return_code`, `stdout`, `stderr`.
+
+ :param build_command: Used for the command and all options.
+ :type build_command: ``list``
+ :param unsafe_shell: Enable or Disable unsafe sell commands.
+ :type unsafe_shell: ``bol``
+ """
+
+ return self.module.run_command(
+ ' '.join(build_command),
+ use_unsafe_shell=unsafe_shell
+ )
+
+ def _config(self):
+ """Configure an LXC container.
+
+ Write new configuration values to the lxc config file. This will
+ stop the container if it's running write the new options and then
+ restart the container upon completion.
+ """
+
+ _container_config = self.module.params.get('container_config')
+ if not _container_config:
+ return False
+
+ container_config_file = self.container.config_file_name
+ with open(container_config_file, 'rb') as f:
+ container_config = f.readlines()
+
+ # Note used ast literal_eval because AnsibleModule does not provide for
+ # adequate dictionary parsing.
+ # Issue: https://github.com/ansible/ansible/issues/7679
+ # TODO(cloudnull) adjust import when issue has been resolved.
+ import ast
+ options_dict = ast.literal_eval(_container_config)
+ parsed_options = [i.split('=', 1) for i in options_dict]
+
+ config_change = False
+ for key, value in parsed_options:
+ key = key.strip()
+ value = value.strip()
+ new_entry = '%s = %s\n' % (key, value)
+ keyre = re.compile(r'%s(\s+)?=' % key)
+ for option_line in container_config:
+ # Look for key in config
+ if keyre.match(option_line):
+ _, _value = option_line.split('=', 1)
+ config_value = ' '.join(_value.split())
+ line_index = container_config.index(option_line)
+ # If the sanitized values don't match replace them
+ if value != config_value:
+ line_index += 1
+ if new_entry not in container_config:
+ config_change = True
+ container_config.insert(line_index, new_entry)
+ # Break the flow as values are written or not at this point
+ break
+ else:
+ config_change = True
+ container_config.append(new_entry)
+
+ # If the config changed restart the container.
+ if config_change:
+ container_state = self._get_state()
+ if container_state != 'stopped':
+ self.container.stop()
+
+ with open(container_config_file, 'wb') as f:
+ f.writelines(container_config)
+
+ self.state_change = True
+ if container_state == 'running':
+ self._container_startup()
+ elif container_state == 'frozen':
+ self._container_startup()
+ self.container.freeze()
+
+ def _container_create_clone(self):
+ """Clone a new LXC container from an existing container.
+
+ This method will clone an existing container to a new container using
+ the `clone_name` variable as the new container name. The method will
+ create a container if the container `name` does not exist.
+
+ Note that cloning a container will ensure that the original container
+ is "stopped" before the clone can be done. Because this operation can
+ require a state change the method will return the original container
+ to its prior state upon completion of the clone.
+
+ Once the clone is complete the new container will be left in a stopped
+ state.
+ """
+
+ # Ensure that the state of the original container is stopped
+ container_state = self._get_state()
+ if container_state != 'stopped':
+ self.state_change = True
+ self.container.stop()
+
+ build_command = [
+ self.module.get_bin_path('lxc-clone', True),
+ ]
+
+ build_command = self._add_variables(
+ variables_dict=self._get_vars(
+ variables=LXC_COMMAND_MAP['clone']['variables']
+ ),
+ build_command=build_command
+ )
+
+ # Load logging for the instance when creating it.
+ if self.module.params.get('clone_snapshot') in BOOLEANS_TRUE:
+ build_command.append('--snapshot')
+ # Check for backing_store == overlayfs if so force the use of snapshot
+ # If overlay fs is used and snapshot is unset the clone command will
+ # fail with an unsupported type.
+ elif self.module.params.get('backing_store') == 'overlayfs':
+ build_command.append('--snapshot')
+
+ rc, return_data, err = self._run_command(build_command)
+ if rc != 0:
+ message = "Failed executing lxc-clone."
+ self.failure(
+ err=err, rc=rc, msg=message, command=' '.join(
+ build_command
+ )
+ )
+ else:
+ self.state_change = True
+ # Restore the original state of the origin container if it was
+ # not in a stopped state.
+ if container_state == 'running':
+ self.container.start()
+ elif container_state == 'frozen':
+ self.container.start()
+ self.container.freeze()
+
+ return True
+
+ def _create(self):
+ """Create a new LXC container.
+
+ This method will build and execute a shell command to build the
+ container. It would have been nice to simply use the lxc python library
+ however at the time this was written the python library, in both py2
+ and py3 didn't support some of the more advanced container create
+ processes. These missing processes mainly revolve around backing
+ LXC containers with block devices.
+ """
+
+ build_command = [
+ self.module.get_bin_path('lxc-create', True),
+ '--name %s' % self.container_name,
+ '--quiet'
+ ]
+
+ build_command = self._add_variables(
+ variables_dict=self._get_vars(
+ variables=LXC_COMMAND_MAP['create']['variables']
+ ),
+ build_command=build_command
+ )
+
+ # Load logging for the instance when creating it.
+ if self.module.params.get('container_log') in BOOLEANS_TRUE:
+ # Set the logging path to the /var/log/lxc if uid is root. else
+ # set it to the home folder of the user executing.
+ try:
+ if os.getuid() != 0:
+ log_path = os.getenv('HOME')
+ else:
+ if not os.path.isdir('/var/log/lxc/'):
+ os.makedirs('/var/log/lxc/')
+ log_path = '/var/log/lxc/'
+ except OSError:
+ log_path = os.getenv('HOME')
+
+ build_command.extend([
+ '--logfile %s' % os.path.join(
+ log_path, 'lxc-%s.log' % self.container_name
+ ),
+ '--logpriority %s' % self.module.params.get(
+ 'container_log_level'
+ ).upper()
+ ])
+
+ # Add the template commands to the end of the command if there are any
+ template_options = self.module.params.get('template_options', None)
+ if template_options:
+ build_command.append('-- %s' % template_options)
+
+ rc, return_data, err = self._run_command(build_command)
+ if rc != 0:
+ message = "Failed executing lxc-create."
+ self.failure(
+ err=err, rc=rc, msg=message, command=' '.join(build_command)
+ )
+ else:
+ self.state_change = True
+
+ def _container_data(self):
+ """Returns a dict of container information.
+
+ :returns: container data
+ :rtype: ``dict``
+ """
+
+ return {
+ 'interfaces': self.container.get_interfaces(),
+ 'ips': self.container.get_ips(),
+ 'state': self._get_state(),
+ 'init_pid': int(self.container.init_pid),
+ 'name' : self.container_name,
+ }
+
+ def _unfreeze(self):
+ """Unfreeze a container.
+
+ :returns: True or False based on if the container was unfrozen.
+ :rtype: ``bol``
+ """
+
+ unfreeze = self.container.unfreeze()
+ if unfreeze:
+ self.state_change = True
+ return unfreeze
+
+ def _get_state(self):
+ """Return the state of a container.
+
+ If the container is not found the state returned is "absent"
+
+ :returns: state of a container as a lower case string.
+ :rtype: ``str``
+ """
+
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ return str(self.container.state).lower()
+ else:
+ return str('absent')
+
+ def _execute_command(self):
+ """Execute a shell command."""
+
+ container_command = self.module.params.get('container_command')
+ if container_command:
+ container_state = self._get_state()
+ if container_state == 'frozen':
+ self._unfreeze()
+ elif container_state == 'stopped':
+ self._container_startup()
+
+ self.container.attach_wait(create_script, container_command)
+ self.state_change = True
+
+ def _container_startup(self, timeout=60):
+ """Ensure a container is started.
+
+ :param timeout: Time before the destroy operation is abandoned.
+ :type timeout: ``int``
+ """
+
+ self.container = self.get_container_bind()
+ for _ in xrange(timeout):
+ if self._get_state() != 'running':
+ self.container.start()
+ self.state_change = True
+ # post startup sleep for 1 second.
+ time.sleep(1)
+ else:
+ return True
+ else:
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to start container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to start. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % self.container_name
+ )
+
+ def _check_archive(self):
+ """Create a compressed archive of a container.
+
+ This will store archive_info in as self.archive_info
+ """
+
+ if self.module.params.get('archive') in BOOLEANS_TRUE:
+ self.archive_info = {
+ 'archive': self._container_create_tar()
+ }
+
+ def _check_clone(self):
+ """Create a compressed archive of a container.
+
+ This will store archive_info in as self.archive_info
+ """
+
+ clone_name = self.module.params.get('clone_name')
+ if clone_name:
+ if not self._container_exists(container_name=clone_name, lxc_path=self.lxc_path):
+ self.clone_info = {
+ 'cloned': self._container_create_clone()
+ }
+ else:
+ self.clone_info = {
+ 'cloned': False
+ }
+
+ def _destroyed(self, timeout=60):
+ """Ensure a container is destroyed.
+
+ :param timeout: Time before the destroy operation is abandoned.
+ :type timeout: ``int``
+ """
+
+ for _ in xrange(timeout):
+ if not self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ break
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+
+ if self._get_state() != 'stopped':
+ self.state_change = True
+ self.container.stop()
+
+ if self.container.destroy():
+ self.state_change = True
+
+ # post destroy attempt sleep for 1 second.
+ time.sleep(1)
+ else:
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to destroy container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to be destroyed. Check'
+ ' that lxc is available and that the container is in a'
+ ' functional state.' % self.container_name
+ )
+
+ def _frozen(self, count=0):
+ """Ensure a container is frozen.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='frozen')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ container_state = self._get_state()
+ if container_state == 'frozen':
+ pass
+ elif container_state == 'running':
+ self.container.freeze()
+ self.state_change = True
+ else:
+ self._container_startup()
+ self.container.freeze()
+ self.state_change = True
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._frozen(count)
+
+ def _restarted(self, count=0):
+ """Ensure a container is restarted.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='restart')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ if self._get_state() != 'stopped':
+ self.container.stop()
+ self.state_change = True
+
+ # Run container startup
+ self._container_startup()
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._restarted(count)
+
+ def _stopped(self, count=0):
+ """Ensure a container is stopped.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='stop')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ if self._get_state() != 'stopped':
+ self.container.stop()
+ self.state_change = True
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._stopped(count)
+
+ def _started(self, count=0):
+ """Ensure a container is started.
+
+ If the container does not exist the container will be created.
+
+ :param count: number of times this command has been called by itself.
+ :type count: ``int``
+ """
+
+ self.check_count(count=count, method='start')
+ if self._container_exists(container_name=self.container_name, lxc_path=self.lxc_path):
+ container_state = self._get_state()
+ if container_state == 'running':
+ pass
+ elif container_state == 'frozen':
+ self._unfreeze()
+ elif not self._container_startup():
+ self.failure(
+ lxc_container=self._container_data(),
+ error='Failed to start container'
+ ' [ %s ]' % self.container_name,
+ rc=1,
+ msg='The container [ %s ] failed to start. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % self.container_name
+ )
+
+ # Return data
+ self._execute_command()
+
+ # Perform any configuration updates
+ self._config()
+
+ # Check if the container needs to have an archive created.
+ self._check_archive()
+
+ # Check if the container is to be cloned
+ self._check_clone()
+ else:
+ self._create()
+ count += 1
+ self._started(count)
+
+ def _get_lxc_vg(self):
+ """Return the name of the Volume Group used in LXC."""
+
+ build_command = [
+ self.module.get_bin_path('lxc-config', True),
+ "lxc.bdev.lvm.vg"
+ ]
+ rc, vg, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to read LVM VG from LXC config',
+ command=' '.join(build_command)
+ )
+ else:
+ return str(vg.strip())
+
+ def _lvm_lv_list(self):
+ """Return a list of all lv in a current vg."""
+
+ vg = self._get_lxc_vg()
+ build_command = [
+ self.module.get_bin_path('lvs', True)
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to get list of LVs',
+ command=' '.join(build_command)
+ )
+
+ all_lvms = [i.split() for i in stdout.splitlines()][1:]
+ return [lv_entry[0] for lv_entry in all_lvms if lv_entry[1] == vg]
+
+ def _get_vg_free_pe(self, vg_name):
+ """Return the available size of a given VG.
+
+ :param vg_name: Name of volume.
+ :type vg_name: ``str``
+ :returns: size and measurement of an LV
+ :type: ``tuple``
+ """
+
+ build_command = [
+ 'vgdisplay',
+ vg_name,
+ '--units',
+ 'g'
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to read vg %s' % vg_name,
+ command=' '.join(build_command)
+ )
+
+ vg_info = [i.strip() for i in stdout.splitlines()][1:]
+ free_pe = [i for i in vg_info if i.startswith('Free')]
+ _free_pe = free_pe[0].split()
+ return float(_free_pe[-2]), _free_pe[-1]
+
+ def _get_lv_size(self, lv_name):
+ """Return the available size of a given LV.
+
+ :param lv_name: Name of volume.
+ :type lv_name: ``str``
+ :returns: size and measurement of an LV
+ :type: ``tuple``
+ """
+
+ vg = self._get_lxc_vg()
+ lv = os.path.join(vg, lv_name)
+ build_command = [
+ 'lvdisplay',
+ lv,
+ '--units',
+ 'g'
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to read lv %s' % lv,
+ command=' '.join(build_command)
+ )
+
+ lv_info = [i.strip() for i in stdout.splitlines()][1:]
+ _free_pe = [i for i in lv_info if i.startswith('LV Size')]
+ free_pe = _free_pe[0].split()
+ return self._roundup(float(free_pe[-2])), free_pe[-1]
+
+ def _lvm_snapshot_create(self, source_lv, snapshot_name,
+ snapshot_size_gb=5):
+ """Create an LVM snapshot.
+
+ :param source_lv: Name of lv to snapshot
+ :type source_lv: ``str``
+ :param snapshot_name: Name of lv snapshot
+ :type snapshot_name: ``str``
+ :param snapshot_size_gb: Size of snapshot to create
+ :type snapshot_size_gb: ``int``
+ """
+
+ vg = self._get_lxc_vg()
+ free_space, messurement = self._get_vg_free_pe(vg_name=vg)
+
+ if free_space < float(snapshot_size_gb):
+ message = (
+ 'Snapshot size [ %s ] is > greater than [ %s ] on volume group'
+ ' [ %s ]' % (snapshot_size_gb, free_space, vg)
+ )
+ self.failure(
+ error='Not enough space to create snapshot',
+ rc=2,
+ msg=message
+ )
+
+ # Create LVM Snapshot
+ build_command = [
+ self.module.get_bin_path('lvcreate', True),
+ "-n",
+ snapshot_name,
+ "-s",
+ os.path.join(vg, source_lv),
+ "-L%sg" % snapshot_size_gb
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to Create LVM snapshot %s/%s --> %s'
+ % (vg, source_lv, snapshot_name)
+ )
+
+ def _lvm_lv_mount(self, lv_name, mount_point):
+ """mount an lv.
+
+ :param lv_name: name of the logical volume to mount
+ :type lv_name: ``str``
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ vg = self._get_lxc_vg()
+
+ build_command = [
+ self.module.get_bin_path('mount', True),
+ "/dev/%s/%s" % (vg, lv_name),
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to mountlvm lv %s/%s to %s'
+ % (vg, lv_name, mount_point)
+ )
+
+ def _create_tar(self, source_dir):
+ """Create an archive of a given ``source_dir`` to ``output_path``.
+
+ :param source_dir: Path to the directory to be archived.
+ :type source_dir: ``str``
+ """
+
+ old_umask = os.umask(int('0077',8))
+
+ archive_path = self.module.params.get('archive_path')
+ if not os.path.isdir(archive_path):
+ os.makedirs(archive_path)
+
+ archive_compression = self.module.params.get('archive_compression')
+ compression_type = LXC_COMPRESSION_MAP[archive_compression]
+
+ # remove trailing / if present.
+ archive_name = '%s.%s' % (
+ os.path.join(
+ archive_path,
+ self.container_name
+ ),
+ compression_type['extension']
+ )
+
+ build_command = [
+ self.module.get_bin_path('tar', True),
+ '--directory=%s' % os.path.realpath(
+ os.path.expanduser(source_dir)
+ ),
+ compression_type['argument'],
+ archive_name,
+ '.'
+ ]
+
+ rc, stdout, err = self._run_command(
+ build_command=build_command,
+ unsafe_shell=True
+ )
+
+ os.umask(old_umask)
+
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to create tar archive',
+ command=' '.join(build_command)
+ )
+
+ return archive_name
+
+ def _lvm_lv_remove(self, lv_name):
+ """Remove an LV.
+
+ :param lv_name: The name of the logical volume
+ :type lv_name: ``str``
+ """
+
+ vg = self._get_lxc_vg()
+ build_command = [
+ self.module.get_bin_path('lvremove', True),
+ "-f",
+ "%s/%s" % (vg, lv_name),
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='Failed to remove LVM LV %s/%s' % (vg, lv_name),
+ command=' '.join(build_command)
+ )
+
+ def _rsync_data(self, container_path, temp_dir):
+ """Sync the container directory to the temp directory.
+
+ :param container_path: path to the container container
+ :type container_path: ``str``
+ :param temp_dir: path to the temporary local working directory
+ :type temp_dir: ``str``
+ """
+ # This loop is created to support overlayfs archives. This should
+ # squash all of the layers into a single archive.
+ fs_paths = container_path.split(':')
+ if 'overlayfs' in fs_paths:
+ fs_paths.pop(fs_paths.index('overlayfs'))
+
+ for fs_path in fs_paths:
+ # Set the path to the container data
+ fs_path = os.path.dirname(fs_path)
+
+ # Run the sync command
+ build_command = [
+ self.module.get_bin_path('rsync', True),
+ '-aHAX',
+ fs_path,
+ temp_dir
+ ]
+ rc, stdout, err = self._run_command(
+ build_command,
+ unsafe_shell=True
+ )
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to perform archive',
+ command=' '.join(build_command)
+ )
+
+ def _unmount(self, mount_point):
+ """Unmount a file system.
+
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ build_command = [
+ self.module.get_bin_path('umount', True),
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to unmount [ %s ]' % mount_point,
+ command=' '.join(build_command)
+ )
+
+ def _overlayfs_mount(self, lowerdir, upperdir, mount_point):
+ """mount an lv.
+
+ :param lowerdir: name/path of the lower directory
+ :type lowerdir: ``str``
+ :param upperdir: name/path of the upper directory
+ :type upperdir: ``str``
+ :param mount_point: path on the file system that is mounted.
+ :type mount_point: ``str``
+ """
+
+ build_command = [
+ self.module.get_bin_path('mount', True),
+ '-t overlayfs',
+ '-o lowerdir=%s,upperdir=%s' % (lowerdir, upperdir),
+ 'overlayfs',
+ mount_point,
+ ]
+ rc, stdout, err = self._run_command(build_command)
+ if rc != 0:
+ self.failure(
+ err=err,
+ rc=rc,
+ msg='failed to mount overlayfs:%s:%s to %s -- Command: %s'
+ % (lowerdir, upperdir, mount_point, build_command)
+ )
+
+ def _container_create_tar(self):
+ """Create a tar archive from an LXC container.
+
+ The process is as follows:
+ * Stop or Freeze the container
+ * Create temporary dir
+ * Copy container and config to temporary directory
+ * If LVM backed:
+ * Create LVM snapshot of LV backing the container
+ * Mount the snapshot to tmpdir/rootfs
+ * Restore the state of the container
+ * Create tar of tmpdir
+ * Clean up
+ """
+
+ # Create a temp dir
+ temp_dir = tempfile.mkdtemp()
+
+ # Set the name of the working dir, temp + container_name
+ work_dir = os.path.join(temp_dir, self.container_name)
+
+ # LXC container rootfs
+ lxc_rootfs = self.container.get_config_item('lxc.rootfs')
+
+ # Test if the containers rootfs is a block device
+ block_backed = lxc_rootfs.startswith(os.path.join(os.sep, 'dev'))
+
+ # Test if the container is using overlayfs
+ overlayfs_backed = lxc_rootfs.startswith('overlayfs')
+
+ mount_point = os.path.join(work_dir, 'rootfs')
+
+ # Set the snapshot name if needed
+ snapshot_name = '%s_lxc_snapshot' % self.container_name
+
+ container_state = self._get_state()
+ try:
+ # Ensure the original container is stopped or frozen
+ if container_state not in ['stopped', 'frozen']:
+ if container_state == 'running':
+ self.container.freeze()
+ else:
+ self.container.stop()
+
+ # Sync the container data from the container_path to work_dir
+ self._rsync_data(lxc_rootfs, temp_dir)
+
+ if block_backed:
+ if snapshot_name not in self._lvm_lv_list():
+ if not os.path.exists(mount_point):
+ os.makedirs(mount_point)
+
+ # Take snapshot
+ size, measurement = self._get_lv_size(
+ lv_name=self.container_name
+ )
+ self._lvm_snapshot_create(
+ source_lv=self.container_name,
+ snapshot_name=snapshot_name,
+ snapshot_size_gb=size
+ )
+
+ # Mount snapshot
+ self._lvm_lv_mount(
+ lv_name=snapshot_name,
+ mount_point=mount_point
+ )
+ else:
+ self.failure(
+ err='snapshot [ %s ] already exists' % snapshot_name,
+ rc=1,
+ msg='The snapshot [ %s ] already exists. Please clean'
+ ' up old snapshot of containers before continuing.'
+ % snapshot_name
+ )
+ elif overlayfs_backed:
+ lowerdir, upperdir = lxc_rootfs.split(':')[1:]
+ self._overlayfs_mount(
+ lowerdir=lowerdir,
+ upperdir=upperdir,
+ mount_point=mount_point
+ )
+
+ # Set the state as changed and set a new fact
+ self.state_change = True
+ return self._create_tar(source_dir=work_dir)
+ finally:
+ if block_backed or overlayfs_backed:
+ # unmount snapshot
+ self._unmount(mount_point)
+
+ if block_backed:
+ # Remove snapshot
+ self._lvm_lv_remove(snapshot_name)
+
+ # Restore original state of container
+ if container_state == 'running':
+ if self._get_state() == 'frozen':
+ self.container.unfreeze()
+ else:
+ self.container.start()
+
+ # Remove tmpdir
+ shutil.rmtree(temp_dir)
+
+ def check_count(self, count, method):
+ if count > 1:
+ self.failure(
+ error='Failed to %s container' % method,
+ rc=1,
+ msg='The container [ %s ] failed to %s. Check to lxc is'
+ ' available and that the container is in a functional'
+ ' state.' % (self.container_name, method)
+ )
+
+ def failure(self, **kwargs):
+ """Return a Failure when running an Ansible command.
+
+ :param error: ``str`` Error that occurred.
+ :param rc: ``int`` Return code while executing an Ansible command.
+ :param msg: ``str`` Message to report.
+ """
+
+ self.module.fail_json(**kwargs)
+
+ def run(self):
+ """Run the main method."""
+
+ action = getattr(self, LXC_ANSIBLE_STATES[self.state])
+ action()
+
+ outcome = self._container_data()
+ if self.archive_info:
+ outcome.update(self.archive_info)
+
+ if self.clone_info:
+ outcome.update(self.clone_info)
+
+ self.module.exit_json(
+ changed=self.state_change,
+ lxc_container=outcome
+ )
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ template=dict(
+ type='str',
+ default='ubuntu'
+ ),
+ backing_store=dict(
+ type='str',
+ choices=LXC_BACKING_STORE.keys(),
+ default='dir'
+ ),
+ template_options=dict(
+ type='str'
+ ),
+ config=dict(
+ type='path',
+ ),
+ vg_name=dict(
+ type='str',
+ default='lxc'
+ ),
+ thinpool=dict(
+ type='str'
+ ),
+ fs_type=dict(
+ type='str',
+ default='ext4'
+ ),
+ fs_size=dict(
+ type='str',
+ default='5G'
+ ),
+ directory=dict(
+ type='path'
+ ),
+ zfs_root=dict(
+ type='str'
+ ),
+ lv_name=dict(
+ type='str'
+ ),
+ lxc_path=dict(
+ type='path'
+ ),
+ state=dict(
+ choices=LXC_ANSIBLE_STATES.keys(),
+ default='started'
+ ),
+ container_command=dict(
+ type='str'
+ ),
+ container_config=dict(
+ type='str'
+ ),
+ container_log=dict(
+ type='bool',
+ default='false'
+ ),
+ container_log_level=dict(
+ choices=[n for i in LXC_LOGGING_LEVELS.values() for n in i],
+ default='INFO'
+ ),
+ clone_name=dict(
+ type='str',
+ required=False
+ ),
+ clone_snapshot=dict(
+ type='bool',
+ default='false'
+ ),
+ archive=dict(
+ type='bool',
+ default='false'
+ ),
+ archive_path=dict(
+ type='path',
+ ),
+ archive_compression=dict(
+ choices=LXC_COMPRESSION_MAP.keys(),
+ default='gzip'
+ )
+ ),
+ supports_check_mode=False,
+ required_if = ([
+ ('archive', True, ['archive_path'])
+ ]),
+ )
+
+ if not HAS_LXC:
+ module.fail_json(
+ msg='The `lxc` module is not importable. Check the requirements.'
+ )
+
+ lv_name = module.params.get('lv_name')
+ if not lv_name:
+ module.params['lv_name'] = module.params.get('name')
+
+ lxc_manage = LxcContainerManagement(module=module)
+ lxc_manage.run()
+
+
+# import module bits
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/lxd/__init__.py b/lib/ansible/modules/cloud/lxd/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/cloud/lxd/__init__.py
diff --git a/lib/ansible/modules/cloud/lxd/lxd_container.py b/lib/ansible/modules/cloud/lxd/lxd_container.py
new file mode 100644
index 0000000000..b4eaa5739a
--- /dev/null
+++ b/lib/ansible/modules/cloud/lxd/lxd_container.py
@@ -0,0 +1,615 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: lxd_container
+short_description: Manage LXD Containers
+version_added: "2.2"
+description:
+ - Management of LXD containers
+author: "Hiroaki Nakamura (@hnakamur)"
+options:
+ name:
+ description:
+ - Name of a container.
+ required: true
+ architecture:
+ description:
+ - The archiecture for the container (e.g. "x86_64" or "i686").
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
+ required: false
+ config:
+ description:
+ - 'The config for the container (e.g. {"limits.cpu": "2"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
+ - If the container already exists and its "config" value in metadata
+ obtained from
+ GET /1.0/containers/<name>
+ U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#10containersname)
+ are different, they this module tries to apply the configurations.
+ - The key starts with 'volatile.' are ignored for this comparison.
+ - Not all config values are supported to apply the existing container.
+ Maybe you need to delete and recreate a container.
+ required: false
+ devices:
+ description:
+ - 'The devices for the container
+ (e.g. { "rootfs": { "path": "/dev/kvm", "type": "unix-char" }).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
+ required: false
+ ephemeral:
+ description:
+ - Whether or not the container is ephemeral (e.g. true or false).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)
+ required: false
+ source:
+ description:
+ - 'The source for the container
+ (e.g. { "type": "image",
+ "mode": "pull",
+ "server": "https://images.linuxcontainers.org",
+ "protocol": "lxd",
+ "alias": "ubuntu/xenial/amd64" }).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-1)'
+ required: false
+ state:
+ choices:
+ - started
+ - stopped
+ - restarted
+ - absent
+ - frozen
+ description:
+ - Define the state of a container.
+ required: false
+ default: started
+ timeout:
+ description:
+ - A timeout for changing the state of the container.
+ - This is also used as a timeout for waiting until IPv4 addresses
+ are set to the all network interfaces in the container after
+ starting or restarting.
+ required: false
+ default: 30
+ wait_for_ipv4_addresses:
+ description:
+ - If this is true, the M(lxd_container) waits until IPv4 addresses
+ are set to the all network interfaces in the container after
+ starting or restarting.
+ required: false
+ default: false
+ force_stop:
+ description:
+ - If this is true, the M(lxd_container) forces to stop the container
+ when it stops or restarts the container.
+ required: false
+ default: false
+ url:
+ description:
+ - The unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ key_file:
+ description:
+ - The client certificate key file path.
+ required: false
+ default: '"{}/.config/lxc/client.key" .format(os.environ["HOME"])'
+ cert_file:
+ description:
+ - The client certificate file path.
+ required: false
+ default: '"{}/.config/lxc/client.crt" .format(os.environ["HOME"])'
+ trust_password:
+ description:
+ - The client trusted password.
+ - You need to set this password on the LXD server before
+ running this module using the following command.
+ lxc config set core.trust_password <some random password>
+ See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
+ - If trust_password is set, this module send a request for
+ authentication before sending any requests.
+ required: false
+notes:
+ - Containers must have a unique name. If you attempt to create a container
+ with a name that already existed in the users namespace the module will
+ simply return as "unchanged".
+ - There are two ways to can run commands in containers, using the command
+ module or using the ansible lxd connection plugin bundled in Ansible >=
+ 2.1, the later requires python to be installed in the container which can
+ be done with the command module.
+ - You can copy a file from the host to the container
+ with the Ansible M(copy) and M(templater) module and the `lxd` connection plugin.
+ See the example below.
+ - You can copy a file in the creatd container to the localhost
+ with `command=lxc file pull container_name/dir/filename filename`.
+ See the first example below.
+'''
+
+EXAMPLES = '''
+# An example for creating a Ubuntu container and install python
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a started container
+ lxd_container:
+ name: mycontainer
+ state: started
+ source:
+ type: image
+ mode: pull
+ server: https://images.linuxcontainers.org
+ protocol: lxd
+ alias: ubuntu/xenial/amd64
+ profiles: ["default"]
+ wait_for_ipv4_addresses: true
+ timeout: 600
+
+ - name: check python is installed in container
+ delegate_to: mycontainer
+ raw: dpkg -s python
+ register: python_install_check
+ failed_when: python_install_check.rc not in [0, 1]
+ changed_when: false
+
+ - name: install python in container
+ delegate_to: mycontainer
+ raw: apt-get install -y python
+ when: python_install_check.rc == 1
+
+# An example for deleting a container
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Delete a container
+ lxd_container:
+ name: mycontainer
+ state: absent
+
+# An example for restarting a container
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Restart a container
+ lxd_container:
+ name: mycontainer
+ state: restarted
+
+# An example for restarting a container using https to connect to the LXD server
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Restart a container
+ lxd_container:
+ url: https://127.0.0.1:8443
+ # These cert_file and key_file values are equal to the default values.
+ #cert_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
+ #key_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
+ trust_password: mypassword
+ name: mycontainer
+ state: restarted
+
+# Note your container must be in the inventory for the below example.
+#
+# [containers]
+# mycontainer ansible_connection=lxd
+#
+- hosts:
+ - mycontainer
+ tasks:
+ - name: copy /etc/hosts in the created container to localhost with name "mycontainer-hosts"
+ fetch:
+ src: /etc/hosts
+ dest: /tmp/mycontainer-hosts
+ flat: true
+'''
+
+RETURN='''
+addresses:
+ description: Mapping from the network device name to a list of IPv4 addresses in the container
+ returned: when state is started or restarted
+ type: object
+ sample: {"eth0": ["10.155.92.191"]}
+old_state:
+ description: The old state of the container
+ returned: when state is started or restarted
+ type: string
+ sample: "stopped"
+logs:
+ description: The logs of requests and responses.
+ returned: when ansible-playbook is invoked with -vvvv.
+ type: list
+ sample: "(too long to be placed here)"
+actions:
+ description: List of actions performed for the container.
+ returned: success
+ type: list
+ sample: '["create", "start"]'
+'''
+
+import os
+from ansible.module_utils.lxd import LXDClient, LXDClientException
+
+# LXD_ANSIBLE_STATES is a map of states that contain values of methods used
+# when a particular state is evoked.
+LXD_ANSIBLE_STATES = {
+ 'started': '_started',
+ 'stopped': '_stopped',
+ 'restarted': '_restarted',
+ 'absent': '_destroyed',
+ 'frozen': '_frozen'
+}
+
+# ANSIBLE_LXD_STATES is a map of states of lxd containers to the Ansible
+# lxc_container module state parameter value.
+ANSIBLE_LXD_STATES = {
+ 'Running': 'started',
+ 'Stopped': 'stopped',
+ 'Frozen': 'frozen',
+}
+
+# CONFIG_PARAMS is a list of config attribute names.
+CONFIG_PARAMS = [
+ 'architecture', 'config', 'devices', 'ephemeral', 'profiles', 'source'
+]
+
+try:
+ callable(all)
+except NameError:
+ # For python <2.5
+ # This definition is copied from https://docs.python.org/2/library/functions.html#all
+ def all(iterable):
+ for element in iterable:
+ if not element:
+ return False
+ return True
+
+class LXDContainerManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.name = self.module.params['name']
+ self._build_config()
+
+ self.state = self.module.params['state']
+
+ self.timeout = self.module.params['timeout']
+ self.wait_for_ipv4_addresses = self.module.params['wait_for_ipv4_addresses']
+ self.force_stop = self.module.params['force_stop']
+ self.addresses = None
+
+ self.url = self.module.params['url']
+ self.key_file = self.module.params.get('key_file', None)
+ self.cert_file = self.module.params.get('cert_file', None)
+ self.debug = self.module._verbosity >= 4
+ try:
+ self.client = LXDClient(
+ self.url, key_file=self.key_file, cert_file=self.cert_file,
+ debug=self.debug
+ )
+ except LXDClientException as e:
+ self.module.fail_json(msg=e.msg)
+ self.trust_password = self.module.params.get('trust_password', None)
+ self.actions = []
+
+ def _build_config(self):
+ self.config = {}
+ for attr in CONFIG_PARAMS:
+ param_val = self.module.params.get(attr, None)
+ if param_val is not None:
+ self.config[attr] = param_val
+
+ def _get_container_json(self):
+ return self.client.do(
+ 'GET', '/1.0/containers/{0}'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ def _get_container_state_json(self):
+ return self.client.do(
+ 'GET', '/1.0/containers/{0}/state'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ @staticmethod
+ def _container_json_to_module_state(resp_json):
+ if resp_json['type'] == 'error':
+ return 'absent'
+ return ANSIBLE_LXD_STATES[resp_json['metadata']['status']]
+
+ def _change_state(self, action, force_stop=False):
+ body_json={'action': action, 'timeout': self.timeout}
+ if force_stop:
+ body_json['force'] = True
+ return self.client.do('PUT', '/1.0/containers/{0}/state'.format(self.name), body_json=body_json)
+
+ def _create_container(self):
+ config = self.config.copy()
+ config['name'] = self.name
+ self.client.do('POST', '/1.0/containers', config)
+ self.actions.append('create')
+
+ def _start_container(self):
+ self._change_state('start')
+ self.actions.append('start')
+
+ def _stop_container(self):
+ self._change_state('stop', self.force_stop)
+ self.actions.append('stop')
+
+ def _restart_container(self):
+ self._change_state('restart', self.force_stop)
+ self.actions.append('restart')
+
+ def _delete_container(self):
+ self.client.do('DELETE', '/1.0/containers/{0}'.format(self.name))
+ self.actions.append('delete')
+
+ def _freeze_container(self):
+ self._change_state('freeze')
+ self.actions.append('freeze')
+
+ def _unfreeze_container(self):
+ self._change_state('unfreeze')
+ self.actions.append('unfreez')
+
+ def _container_ipv4_addresses(self, ignore_devices=['lo']):
+ resp_json = self._get_container_state_json()
+ network = resp_json['metadata']['network'] or {}
+ network = dict((k, v) for k, v in network.items() if k not in ignore_devices) or {}
+ addresses = dict((k, [a['address'] for a in v['addresses'] if a['family'] == 'inet']) for k, v in network.items()) or {}
+ return addresses
+
+ @staticmethod
+ def _has_all_ipv4_addresses(addresses):
+ return len(addresses) > 0 and all([len(v) > 0 for v in addresses.itervalues()])
+
+ def _get_addresses(self):
+ try:
+ due = datetime.datetime.now() + datetime.timedelta(seconds=self.timeout)
+ while datetime.datetime.now() < due:
+ time.sleep(1)
+ addresses = self._container_ipv4_addresses()
+ if self._has_all_ipv4_addresses(addresses):
+ self.addresses = addresses
+ return
+ except LXDClientException as e:
+ e.msg = 'timeout for getting IPv4 addresses'
+ raise
+
+ def _started(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ elif self.old_state == 'stopped':
+ self._start_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ if self.wait_for_ipv4_addresses:
+ self._get_addresses()
+
+ def _stopped(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ else:
+ if self.old_state == 'stopped':
+ if self._needs_to_apply_container_configs():
+ self._start_container()
+ self._apply_container_configs()
+ self._stop_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._stop_container()
+
+ def _restarted(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ else:
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._restart_container()
+ if self.wait_for_ipv4_addresses:
+ self._get_addresses()
+
+ def _destroyed(self):
+ if self.old_state != 'absent':
+ if self.old_state == 'frozen':
+ self._unfreeze_container()
+ if self.old_state != 'stopped':
+ self._stop_container()
+ self._delete_container()
+
+ def _frozen(self):
+ if self.old_state == 'absent':
+ self._create_container()
+ self._start_container()
+ self._freeze_container()
+ else:
+ if self.old_state == 'stopped':
+ self._start_container()
+ if self._needs_to_apply_container_configs():
+ self._apply_container_configs()
+ self._freeze_container()
+
+ def _needs_to_change_container_config(self, key):
+ if key not in self.config:
+ return False
+ if key == 'config':
+ old_configs = dict((k, v) for k, v in self.old_container_json['metadata'][key].items() if not k.startswith('volatile.'))
+ else:
+ old_configs = self.old_container_json['metadata'][key]
+ return self.config[key] != old_configs
+
+ def _needs_to_apply_container_configs(self):
+ return (
+ self._needs_to_change_container_config('architecture') or
+ self._needs_to_change_container_config('config') or
+ self._needs_to_change_container_config('ephemeral') or
+ self._needs_to_change_container_config('devices') or
+ self._needs_to_change_container_config('profiles')
+ )
+
+ def _apply_container_configs(self):
+ old_metadata = self.old_container_json['metadata']
+ body_json = {
+ 'architecture': old_metadata['architecture'],
+ 'config': old_metadata['config'],
+ 'devices': old_metadata['devices'],
+ 'profiles': old_metadata['profiles']
+ }
+ if self._needs_to_change_container_config('architecture'):
+ body_json['architecture'] = self.config['architecture']
+ if self._needs_to_change_container_config('config'):
+ for k, v in self.config['config'].items():
+ body_json['config'][k] = v
+ if self._needs_to_change_container_config('ephemeral'):
+ body_json['ephemeral'] = self.config['ephemeral']
+ if self._needs_to_change_container_config('devices'):
+ body_json['devices'] = self.config['devices']
+ if self._needs_to_change_container_config('profiles'):
+ body_json['profiles'] = self.config['profiles']
+ self.client.do('PUT', '/1.0/containers/{0}'.format(self.name), body_json=body_json)
+ self.actions.append('apply_container_configs')
+
+ def run(self):
+ """Run the main method."""
+
+ try:
+ if self.trust_password is not None:
+ self.client.authenticate(self.trust_password)
+
+ self.old_container_json = self._get_container_json()
+ self.old_state = self._container_json_to_module_state(self.old_container_json)
+ action = getattr(self, LXD_ANSIBLE_STATES[self.state])
+ action()
+
+ state_changed = len(self.actions) > 0
+ result_json = {
+ 'log_verbosity': self.module._verbosity,
+ 'changed': state_changed,
+ 'old_state': self.old_state,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ result_json['logs'] = self.client.logs
+ if self.addresses is not None:
+ result_json['addresses'] = self.addresses
+ self.module.exit_json(**result_json)
+ except LXDClientException as e:
+ state_changed = len(self.actions) > 0
+ fail_params = {
+ 'msg': e.msg,
+ 'changed': state_changed,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ fail_params['logs'] = e.kwargs['logs']
+ self.module.fail_json(**fail_params)
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ architecture=dict(
+ type='str',
+ ),
+ config=dict(
+ type='dict',
+ ),
+ description=dict(
+ type='str',
+ ),
+ devices=dict(
+ type='dict',
+ ),
+ ephemeral=dict(
+ type='bool',
+ ),
+ profiles=dict(
+ type='list',
+ ),
+ source=dict(
+ type='dict',
+ ),
+ state=dict(
+ choices=LXD_ANSIBLE_STATES.keys(),
+ default='started'
+ ),
+ timeout=dict(
+ type='int',
+ default=30
+ ),
+ wait_for_ipv4_addresses=dict(
+ type='bool',
+ default=False
+ ),
+ force_stop=dict(
+ type='bool',
+ default=False
+ ),
+ url=dict(
+ type='str',
+ default='unix:/var/lib/lxd/unix.socket'
+ ),
+ key_file=dict(
+ type='str',
+ default='{}/.config/lxc/client.key'.format(os.environ['HOME'])
+ ),
+ cert_file=dict(
+ type='str',
+ default='{}/.config/lxc/client.crt'.format(os.environ['HOME'])
+ ),
+ trust_password=dict(
+ type='str',
+ )
+ ),
+ supports_check_mode=False,
+ )
+
+ lxd_manage = LXDContainerManagement(module=module)
+ lxd_manage.run()
+
+# import module bits
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/lxd/lxd_profile.py b/lib/ansible/modules/cloud/lxd/lxd_profile.py
new file mode 100644
index 0000000000..546d0c09ea
--- /dev/null
+++ b/lib/ansible/modules/cloud/lxd/lxd_profile.py
@@ -0,0 +1,378 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Hiroaki Nakamura <hnakamur@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: lxd_profile
+short_description: Manage LXD profiles
+version_added: "2.2"
+description:
+ - Management of LXD profiles
+author: "Hiroaki Nakamura (@hnakamur)"
+options:
+ name:
+ description:
+ - Name of a profile.
+ required: true
+ config:
+ description:
+ - 'The config for the container (e.g. {"limits.memory": "4GB"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
+ - If the profile already exists and its "config" value in metadata
+ obtained from
+ GET /1.0/profiles/<name>
+ U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#get-19)
+ are different, they this module tries to apply the configurations.
+ - Not all config values are supported to apply the existing profile.
+ Maybe you need to delete and recreate a profile.
+ required: false
+ devices:
+ description:
+ - 'The devices for the profile
+ (e.g. {"rootfs": {"path": "/dev/kvm", "type": "unix-char"}).
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#patch-3)'
+ required: false
+ new_name:
+ description:
+ - A new name of a profile.
+ - If this parameter is specified a profile will be renamed to this name.
+ See U(https://github.com/lxc/lxd/blob/master/doc/rest-api.md#post-11)
+ required: false
+ state:
+ choices:
+ - present
+ - absent
+ description:
+ - Define the state of a profile.
+ required: false
+ default: present
+ url:
+ description:
+ - The unix domain socket path or the https URL for the LXD server.
+ required: false
+ default: unix:/var/lib/lxd/unix.socket
+ key_file:
+ description:
+ - The client certificate key file path.
+ required: false
+ default: '"{}/.config/lxc/client.key" .format(os.environ["HOME"])'
+ cert_file:
+ description:
+ - The client certificate file path.
+ required: false
+ default: '"{}/.config/lxc/client.crt" .format(os.environ["HOME"])'
+ trust_password:
+ description:
+ - The client trusted password.
+ - You need to set this password on the LXD server before
+ running this module using the following command.
+ lxc config set core.trust_password <some random password>
+ See U(https://www.stgraber.org/2016/04/18/lxd-api-direct-interaction/)
+ - If trust_password is set, this module send a request for
+ authentication before sending any requests.
+ required: false
+notes:
+ - Profiles must have a unique name. If you attempt to create a profile
+ with a name that already existed in the users namespace the module will
+ simply return as "unchanged".
+'''
+
+EXAMPLES = '''
+# An example for creating a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Create a profile
+ lxd_profile:
+ name: macvlan
+ state: present
+ config: {}
+ description: my macvlan profile
+ devices:
+ eth0:
+ nictype: macvlan
+ parent: br0
+ type: nic
+
+# An example for creating a profile via http connection
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: create macvlan profile
+ lxd_profile:
+ url: https://127.0.0.1:8443
+ # These cert_file and key_file values are equal to the default values.
+ #cert_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.crt"
+ #key_file: "{{ lookup('env', 'HOME') }}/.config/lxc/client.key"
+ trust_password: mypassword
+ name: macvlan
+ state: present
+ config: {}
+ description: my macvlan profile
+ devices:
+ eth0:
+ nictype: macvlan
+ parent: br0
+ type: nic
+
+# An example for deleting a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Delete a profile
+ lxd_profile:
+ name: macvlan
+ state: absent
+
+# An example for renaming a profile
+- hosts: localhost
+ connection: local
+ tasks:
+ - name: Rename a profile
+ lxd_profile:
+ name: macvlan
+ new_name: macvlan2
+ state: present
+'''
+
+RETURN='''
+old_state:
+ description: The old state of the profile
+ returned: success
+ type: string
+ sample: "absent"
+logs:
+ description: The logs of requests and responses.
+ returned: when ansible-playbook is invoked with -vvvv.
+ type: list
+ sample: "(too long to be placed here)"
+actions:
+ description: List of actions performed for the profile.
+ returned: success
+ type: list
+ sample: '["create"]'
+'''
+
+import os
+from ansible.module_utils.lxd import LXDClient, LXDClientException
+
+# PROFILE_STATES is a list for states supported
+PROFILES_STATES = [
+ 'present', 'absent'
+]
+
+# CONFIG_PARAMS is a list of config attribute names.
+CONFIG_PARAMS = [
+ 'config', 'description', 'devices'
+]
+
+class LXDProfileManagement(object):
+ def __init__(self, module):
+ """Management of LXC containers via Ansible.
+
+ :param module: Processed Ansible Module.
+ :type module: ``object``
+ """
+ self.module = module
+ self.name = self.module.params['name']
+ self._build_config()
+ self.state = self.module.params['state']
+ self.new_name = self.module.params.get('new_name', None)
+
+ self.url = self.module.params['url']
+ self.key_file = self.module.params.get('key_file', None)
+ self.cert_file = self.module.params.get('cert_file', None)
+ self.debug = self.module._verbosity >= 4
+ try:
+ self.client = LXDClient(
+ self.url, key_file=self.key_file, cert_file=self.cert_file,
+ debug=self.debug
+ )
+ except LXDClientException as e:
+ self.module.fail_json(msg=e.msg)
+ self.trust_password = self.module.params.get('trust_password', None)
+ self.actions = []
+
+ def _build_config(self):
+ self.config = {}
+ for attr in CONFIG_PARAMS:
+ param_val = self.module.params.get(attr, None)
+ if param_val is not None:
+ self.config[attr] = param_val
+
+ def _get_profile_json(self):
+ return self.client.do(
+ 'GET', '/1.0/profiles/{0}'.format(self.name),
+ ok_error_codes=[404]
+ )
+
+ @staticmethod
+ def _profile_json_to_module_state(resp_json):
+ if resp_json['type'] == 'error':
+ return 'absent'
+ return 'present'
+
+ def _update_profile(self):
+ if self.state == 'present':
+ if self.old_state == 'absent':
+ if self.new_name is None:
+ self._create_profile()
+ else:
+ self.module.fail_json(
+ msg='new_name must not be set when the profile does not exist and the specified state is present',
+ changed=False)
+ else:
+ if self.new_name is not None and self.new_name != self.name:
+ self._rename_profile()
+ if self._needs_to_apply_profile_configs():
+ self._apply_profile_configs()
+ elif self.state == 'absent':
+ if self.old_state == 'present':
+ if self.new_name is None:
+ self._delete_profile()
+ else:
+ self.module.fail_json(
+ msg='new_name must not be set when the profile exists and the specified state is absent',
+ changed=False)
+
+ def _create_profile(self):
+ config = self.config.copy()
+ config['name'] = self.name
+ self.client.do('POST', '/1.0/profiles', config)
+ self.actions.append('create')
+
+ def _rename_profile(self):
+ config = {'name': self.new_name}
+ self.client.do('POST', '/1.0/profiles/{}'.format(self.name), config)
+ self.actions.append('rename')
+ self.name = self.new_name
+
+ def _needs_to_change_profile_config(self, key):
+ if key not in self.config:
+ return False
+ old_configs = self.old_profile_json['metadata'].get(key, None)
+ return self.config[key] != old_configs
+
+ def _needs_to_apply_profile_configs(self):
+ return (
+ self._needs_to_change_profile_config('config') or
+ self._needs_to_change_profile_config('description') or
+ self._needs_to_change_profile_config('devices')
+ )
+
+ def _apply_profile_configs(self):
+ config = self.old_profile_json.copy()
+ for k, v in self.config.iteritems():
+ config[k] = v
+ self.client.do('PUT', '/1.0/profiles/{}'.format(self.name), config)
+ self.actions.append('apply_profile_configs')
+
+ def _delete_profile(self):
+ self.client.do('DELETE', '/1.0/profiles/{}'.format(self.name))
+ self.actions.append('delete')
+
+ def run(self):
+ """Run the main method."""
+
+ try:
+ if self.trust_password is not None:
+ self.client.authenticate(self.trust_password)
+
+ self.old_profile_json = self._get_profile_json()
+ self.old_state = self._profile_json_to_module_state(self.old_profile_json)
+ self._update_profile()
+
+ state_changed = len(self.actions) > 0
+ result_json = {
+ 'changed': state_changed,
+ 'old_state': self.old_state,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ result_json['logs'] = self.client.logs
+ self.module.exit_json(**result_json)
+ except LXDClientException as e:
+ state_changed = len(self.actions) > 0
+ fail_params = {
+ 'msg': e.msg,
+ 'changed': state_changed,
+ 'actions': self.actions
+ }
+ if self.client.debug:
+ fail_params['logs'] = e.kwargs['logs']
+ self.module.fail_json(**fail_params)
+
+
+def main():
+ """Ansible Main module."""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(
+ type='str',
+ required=True
+ ),
+ new_name=dict(
+ type='str',
+ ),
+ config=dict(
+ type='dict',
+ ),
+ description=dict(
+ type='str',
+ ),
+ devices=dict(
+ type='dict',
+ ),
+ state=dict(
+ choices=PROFILES_STATES,
+ default='present'
+ ),
+ url=dict(
+ type='str',
+ default='unix:/var/lib/lxd/unix.socket'
+ ),
+ key_file=dict(
+ type='str',
+ default='{}/.config/lxc/client.key'.format(os.environ['HOME'])
+ ),
+ cert_file=dict(
+ type='str',
+ default='{}/.config/lxc/client.crt'.format(os.environ['HOME'])
+ ),
+ trust_password=dict(
+ type='str',
+ )
+ ),
+ supports_check_mode=False,
+ )
+
+ lxd_manage = LXDProfileManagement(module=module)
+ lxd_manage.run()
+
+# import module bits
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/misc/__init__.py b/lib/ansible/modules/cloud/misc/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/cloud/misc/__init__.py
diff --git a/lib/ansible/modules/cloud/misc/ovirt.py b/lib/ansible/modules/cloud/misc/ovirt.py
new file mode 100644
index 0000000000..af89998258
--- /dev/null
+++ b/lib/ansible/modules/cloud/misc/ovirt.py
@@ -0,0 +1,527 @@
+#!/usr/bin/python
+
+# (c) 2013, Vincent Van der Kussen <vincent at vanderkussen.org>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt
+author: "Vincent Van der Kussen (@vincentvdk)"
+short_description: oVirt/RHEV platform management
+description:
+ - allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform
+version_added: "1.4"
+options:
+ user:
+ description:
+ - the user to authenticate with
+ default: null
+ required: true
+ aliases: []
+ url:
+ description:
+ - the url of the oVirt instance
+ default: null
+ required: true
+ aliases: []
+ instance_name:
+ description:
+ - the name of the instance to use
+ default: null
+ required: true
+ aliases: [ vmname ]
+ password:
+ description:
+ - password of the user to authenticate with
+ default: null
+ required: true
+ aliases: []
+ image:
+ description:
+ - template to use for the instance
+ default: null
+ required: false
+ aliases: []
+ resource_type:
+ description:
+ - whether you want to deploy an image or create an instance from scratch.
+ default: null
+ required: false
+ aliases: []
+ choices: [ 'new', 'template' ]
+ zone:
+ description:
+ - deploy the image to this oVirt cluster
+ default: null
+ required: false
+ aliases: []
+ instance_disksize:
+ description:
+ - size of the instance's disk in GB
+ default: null
+ required: false
+ aliases: [ vm_disksize]
+ instance_cpus:
+ description:
+ - the instance's number of cpu's
+ default: 1
+ required: false
+ aliases: [ vmcpus ]
+ instance_nic:
+ description:
+ - name of the network interface in oVirt/RHEV
+ default: null
+ required: false
+ aliases: [ vmnic ]
+ instance_network:
+ description:
+ - the logical network the machine should belong to
+ default: rhevm
+ required: false
+ aliases: [ vmnetwork ]
+ instance_mem:
+ description:
+ - the instance's amount of memory in MB
+ default: null
+ required: false
+ aliases: [ vmmem ]
+ instance_type:
+ description:
+ - define if the instance is a server or desktop
+ default: server
+ required: false
+ aliases: [ vmtype ]
+ choices: [ 'server', 'desktop' ]
+ disk_alloc:
+ description:
+ - define if disk is thin or preallocated
+ default: thin
+ required: false
+ aliases: []
+ choices: [ 'thin', 'preallocated' ]
+ disk_int:
+ description:
+ - interface type of the disk
+ default: virtio
+ required: false
+ aliases: []
+ choices: [ 'virtio', 'ide' ]
+ instance_os:
+ description:
+ - type of Operating System
+ default: null
+ required: false
+ aliases: [ vmos ]
+ instance_cores:
+ description:
+ - define the instance's number of cores
+ default: 1
+ required: false
+ aliases: [ vmcores ]
+ sdomain:
+ description:
+ - the Storage Domain where you want to create the instance's disk on.
+ default: null
+ required: false
+ aliases: []
+ region:
+ description:
+ - the oVirt/RHEV datacenter where you want to deploy to
+ default: null
+ required: false
+ aliases: []
+ instance_dns:
+ description:
+ - define the instance's Primary DNS server
+ required: false
+ aliases: [ dns ]
+ version_added: "2.1"
+ instance_domain:
+ description:
+ - define the instance's Domain
+ required: false
+ aliases: [ domain ]
+ version_added: "2.1"
+ instance_hostname:
+ description:
+ - define the instance's Hostname
+ required: false
+ aliases: [ hostname ]
+ version_added: "2.1"
+ instance_ip:
+ description:
+ - define the instance's IP
+ required: false
+ aliases: [ ip ]
+ version_added: "2.1"
+ instance_netmask:
+ description:
+ - define the instance's Netmask
+ required: false
+ aliases: [ netmask ]
+ version_added: "2.1"
+ instance_rootpw:
+ description:
+ - define the instance's Root password
+ required: false
+ aliases: [ rootpw ]
+ version_added: "2.1"
+ instance_key:
+ description:
+ - define the instance's Authorized key
+ required: false
+ aliases: [ key ]
+ version_added: "2.1"
+ state:
+ description:
+ - create, terminate or remove instances
+ default: 'present'
+ required: false
+ aliases: []
+ choices: ['present', 'absent', 'shutdown', 'started', 'restarted']
+
+requirements:
+ - "python >= 2.6"
+ - "ovirt-engine-sdk-python"
+'''
+EXAMPLES = '''
+# Basic example provisioning from image.
+
+ovirt:
+ user: admin@internal
+ url: https://ovirt.example.com
+ instance_name: ansiblevm04
+ password: secret
+ image: centos_64
+ zone: cluster01
+ resource_type: template"
+
+# Full example to create new instance from scratch
+ovirt:
+ instance_name: testansible
+ resource_type: new
+ instance_type: server
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+ instance_disksize: 10
+ zone: cluster01
+ region: datacenter1
+ instance_cpus: 1
+ instance_nic: nic1
+ instance_network: rhevm
+ instance_mem: 1000
+ disk_alloc: thin
+ sdomain: FIBER01
+ instance_cores: 1
+ instance_os: rhel_6x64
+ disk_int: virtio"
+
+# stopping an instance
+ovirt:
+ instance_name: testansible
+ state: stopped
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+
+# starting an instance
+ovirt:
+ instance_name: testansible
+ state: started
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+
+# starting an instance with cloud init information
+ovirt:
+ instance_name: testansible
+ state: started
+ user: admin@internal
+ password: secret
+ url: https://ovirt.example.com
+ hostname: testansible
+ domain: ansible.local
+ ip: 192.0.2.100
+ netmask: 255.255.255.0
+ gateway: 192.0.2.1
+ rootpw: bigsecret
+
+'''
+
+try:
+ from ovirtsdk.api import API
+ from ovirtsdk.xml import params
+ HAS_OVIRTSDK = True
+except ImportError:
+ HAS_OVIRTSDK = False
+
+# ------------------------------------------------------------------- #
+# create connection with API
+#
+def conn(url, user, password):
+ api = API(url=url, username=user, password=password, insecure=True)
+ try:
+ value = api.test()
+ except:
+ raise Exception("error connecting to the oVirt API")
+ return api
+
+# ------------------------------------------------------------------- #
+# Create VM from scratch
+def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
+ if vmdisk_alloc == 'thin':
+ # define VM params
+ vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype)
+ # define disk params
+ vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow',
+ storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
+ # define network parameters
+ network_net = params.Network(name=vmnetwork)
+ nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
+ elif vmdisk_alloc == 'preallocated':
+ # define VM params
+ vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype)
+ # define disk params
+ vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", format='raw',
+ storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
+ # define network parameters
+ network_net = params.Network(name=vmnetwork)
+ nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
+
+ try:
+ conn.vms.add(vmparams)
+ except:
+ raise Exception("Error creating VM with specified parameters")
+ vm = conn.vms.get(name=vmname)
+ try:
+ vm.disks.add(vmdisk)
+ except:
+ raise Exception("Error attaching disk")
+ try:
+ vm.nics.add(nic_net1)
+ except:
+ raise Exception("Error adding nic")
+
+
+# create an instance from a template
+def create_vm_template(conn, vmname, image, zone):
+ vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image),disks=params.Disks(clone=True))
+ try:
+ conn.vms.add(vmparams)
+ except:
+ raise Exception('error adding template %s' % image)
+
+
+# start instance
+def vm_start(conn, vmname, hostname=None, ip=None, netmask=None, gateway=None,
+ domain=None, dns=None, rootpw=None, key=None):
+ vm = conn.vms.get(name=vmname)
+ use_cloud_init = False
+ nics = None
+ nic = None
+ if hostname or ip or netmask or gateway or domain or dns or rootpw or key:
+ use_cloud_init = True
+ if ip and netmask and gateway:
+ ipinfo = params.IP(address=ip, netmask=netmask, gateway=gateway)
+ nic = params.GuestNicConfiguration(name='eth0', boot_protocol='STATIC', ip=ipinfo, on_boot=True)
+ nics = params.Nics()
+ nics = params.GuestNicsConfiguration(nic_configuration=[nic])
+ initialization=params.Initialization(regenerate_ssh_keys=True, host_name=hostname, domain=domain, user_name='root',
+ root_password=rootpw, nic_configurations=nics, dns_servers=dns,
+ authorized_ssh_keys=key)
+ action = params.Action(use_cloud_init=use_cloud_init, vm=params.VM(initialization=initialization))
+ vm.start(action=action)
+
+# Stop instance
+def vm_stop(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ vm.stop()
+
+# restart instance
+def vm_restart(conn, vmname):
+ state = vm_status(conn, vmname)
+ vm = conn.vms.get(name=vmname)
+ vm.stop()
+ while conn.vms.get(vmname).get_status().get_state() != 'down':
+ time.sleep(5)
+ vm.start()
+
+# remove an instance
+def vm_remove(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ vm.delete()
+
+# ------------------------------------------------------------------- #
+# VM statuses
+#
+# Get the VMs status
+def vm_status(conn, vmname):
+ status = conn.vms.get(name=vmname).status.state
+ return status
+
+
+# Get VM object and return it's name if object exists
+def get_vm(conn, vmname):
+ vm = conn.vms.get(name=vmname)
+ if vm == None:
+ name = "empty"
+ else:
+ name = vm.get_name()
+ return name
+
+# ------------------------------------------------------------------- #
+# Hypervisor operations
+#
+# not available yet
+# ------------------------------------------------------------------- #
+# Main
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default='present', choices=['present', 'absent', 'shutdown', 'started', 'restart']),
+ #name = dict(required=True),
+ user = dict(required=True),
+ url = dict(required=True),
+ instance_name = dict(required=True, aliases=['vmname']),
+ password = dict(required=True, no_log=True),
+ image = dict(),
+ resource_type = dict(choices=['new', 'template']),
+ zone = dict(),
+ instance_disksize = dict(aliases=['vm_disksize']),
+ instance_cpus = dict(default=1, aliases=['vmcpus']),
+ instance_nic = dict(aliases=['vmnic']),
+ instance_network = dict(default='rhevm', aliases=['vmnetwork']),
+ instance_mem = dict(aliases=['vmmem']),
+ instance_type = dict(default='server', aliases=['vmtype'], choices=['server', 'desktop']),
+ disk_alloc = dict(default='thin', choices=['thin', 'preallocated']),
+ disk_int = dict(default='virtio', choices=['virtio', 'ide']),
+ instance_os = dict(aliases=['vmos']),
+ instance_cores = dict(default=1, aliases=['vmcores']),
+ instance_hostname = dict(aliases=['hostname']),
+ instance_ip = dict(aliases=['ip']),
+ instance_netmask = dict(aliases=['netmask']),
+ instance_gateway = dict(aliases=['gateway']),
+ instance_domain = dict(aliases=['domain']),
+ instance_dns = dict(aliases=['dns']),
+ instance_rootpw = dict(aliases=['rootpw']),
+ instance_key = dict(aliases=['key']),
+ sdomain = dict(),
+ region = dict(),
+ )
+ )
+
+ if not HAS_OVIRTSDK:
+ module.fail_json(msg='ovirtsdk required for this module')
+
+ state = module.params['state']
+ user = module.params['user']
+ url = module.params['url']
+ vmname = module.params['instance_name']
+ password = module.params['password']
+ image = module.params['image'] # name of the image to deploy
+ resource_type = module.params['resource_type'] # template or from scratch
+ zone = module.params['zone'] # oVirt cluster
+ vmdisk_size = module.params['instance_disksize'] # disksize
+ vmcpus = module.params['instance_cpus'] # number of cpu
+ vmnic = module.params['instance_nic'] # network interface
+ vmnetwork = module.params['instance_network'] # logical network
+ vmmem = module.params['instance_mem'] # mem size
+ vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated
+ vmdisk_int = module.params['disk_int'] # disk interface virtio or ide
+ vmos = module.params['instance_os'] # Operating System
+ vmtype = module.params['instance_type'] # server or desktop
+ vmcores = module.params['instance_cores'] # number of cores
+ sdomain = module.params['sdomain'] # storage domain to store disk on
+ region = module.params['region'] # oVirt Datacenter
+ hostname = module.params['instance_hostname']
+ ip = module.params['instance_ip']
+ netmask = module.params['instance_netmask']
+ gateway = module.params['instance_gateway']
+ domain = module.params['instance_domain']
+ dns = module.params['instance_dns']
+ rootpw = module.params['instance_rootpw']
+ key = module.params['instance_key']
+ #initialize connection
+ try:
+ c = conn(url+"/api", user, password)
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+
+ if state == 'present':
+ if get_vm(c, vmname) == "empty":
+ if resource_type == 'template':
+ try:
+ create_vm_template(c, vmname, image, zone)
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+ module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname,image))
+ elif resource_type == 'new':
+ # FIXME: refactor, use keyword args.
+ try:
+ create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int)
+ except Exception as e:
+ module.fail_json(msg='%s' % e)
+ module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname)
+ else:
+ module.exit_json(changed=False, msg="You did not specify a resource type")
+ else:
+ module.exit_json(changed=False, msg="VM %s already exists" % vmname)
+
+ if state == 'started':
+ if vm_status(c, vmname) == 'up':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmname)
+ else:
+ #vm_start(c, vmname)
+ vm_start(c, vmname, hostname, ip, netmask, gateway, domain, dns, rootpw, key)
+ module.exit_json(changed=True, msg="VM %s started" % vmname)
+
+ if state == 'shutdown':
+ if vm_status(c, vmname) == 'down':
+ module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname)
+ else:
+ vm_stop(c, vmname)
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
+
+ if state == 'restart':
+ if vm_status(c, vmname) == 'up':
+ vm_restart(c, vmname)
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmname)
+ else:
+ module.exit_json(changed=False, msg="VM %s is not running" % vmname)
+
+ if state == 'absent':
+ if get_vm(c, vmname) == "empty":
+ module.exit_json(changed=False, msg="VM %s does not exist" % vmname)
+ else:
+ vm_remove(c, vmname)
+ module.exit_json(changed=True, msg="VM %s removed" % vmname)
+
+
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/misc/proxmox.py b/lib/ansible/modules/cloud/misc/proxmox.py
new file mode 100644
index 0000000000..c404519d49
--- /dev/null
+++ b/lib/ansible/modules/cloud/misc/proxmox.py
@@ -0,0 +1,591 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: proxmox
+short_description: management of instances in Proxmox VE cluster
+description:
+ - allows you to create/delete/stop instances in Proxmox VE cluster
+ - Starting in Ansible 2.1, it automatically detects containerization type (lxc for PVE 4, openvz for older)
+version_added: "2.0"
+options:
+ api_host:
+ description:
+ - the host of the Proxmox VE cluster
+ required: true
+ api_user:
+ description:
+ - the user to authenticate with
+ required: true
+ api_password:
+ description:
+ - the password to authenticate with
+ - you can use PROXMOX_PASSWORD environment variable
+ default: null
+ required: false
+ vmid:
+ description:
+ - the instance id
+ - if not set, the next available VM ID will be fetched from ProxmoxAPI.
+ - if not set, will be fetched from PromoxAPI based on the hostname
+ default: null
+ required: false
+ validate_certs:
+ description:
+ - enable / disable https certificate verification
+ default: false
+ required: false
+ type: boolean
+ node:
+ description:
+ - Proxmox VE node, when new VM will be created
+ - required only for C(state=present)
+ - for another states will be autodiscovered
+ default: null
+ required: false
+ pool:
+ description:
+ - Proxmox VE resource pool
+ default: null
+ required: false
+ version_added: "2.3"
+ password:
+ description:
+ - the instance root password
+ - required only for C(state=present)
+ default: null
+ required: false
+ hostname:
+ description:
+ - the instance hostname
+ - required only for C(state=present)
+ - must be unique if vmid is not passed
+ default: null
+ required: false
+ ostemplate:
+ description:
+ - the template for VM creating
+ - required only for C(state=present)
+ default: null
+ required: false
+ disk:
+ description:
+ - hard disk size in GB for instance
+ default: 3
+ required: false
+ cpus:
+ description:
+ - numbers of allocated cpus for instance
+ default: 1
+ required: false
+ memory:
+ description:
+ - memory size in MB for instance
+ default: 512
+ required: false
+ swap:
+ description:
+ - swap memory size in MB for instance
+ default: 0
+ required: false
+ netif:
+ description:
+ - specifies network interfaces for the container
+ default: null
+ required: false
+ type: A hash/dictionary defining interfaces
+ mounts:
+ description:
+ - specifies additional mounts (separate disks) for the container
+ default: null
+ required: false
+ type: A hash/dictionary defining mount points
+ version_added: "2.2"
+ ip_address:
+ description:
+ - specifies the address the container will be assigned
+ default: null
+ required: false
+ type: string
+ onboot:
+ description:
+ - specifies whether a VM will be started during system bootup
+ default: false
+ required: false
+ type: boolean
+ storage:
+ description:
+ - target storage
+ default: 'local'
+ required: false
+ type: string
+ cpuunits:
+ description:
+ - CPU weight for a VM
+ default: 1000
+ required: false
+ type: integer
+ nameserver:
+ description:
+ - sets DNS server IP address for a container
+ default: null
+ required: false
+ type: string
+ searchdomain:
+ description:
+ - sets DNS search domain for a container
+ default: null
+ required: false
+ type: string
+ timeout:
+ description:
+ - timeout for operations
+ default: 30
+ required: false
+ type: integer
+ force:
+ description:
+ - forcing operations
+ - can be used only with states C(present), C(stopped), C(restarted)
+ - with C(state=present) force option allow to overwrite existing container
+ - with states C(stopped) , C(restarted) allow to force stop instance
+ default: false
+ required: false
+ type: boolean
+ state:
+ description:
+ - Indicate desired state of the instance
+ choices: ['present', 'started', 'absent', 'stopped', 'restarted']
+ default: present
+notes:
+ - Requires proxmoxer and requests modules on host. This modules can be installed with pip.
+requirements: [ "proxmoxer", "python >= 2.7", "requests" ]
+author: "Sergei Antipov @UnderGreen"
+'''
+
+EXAMPLES = '''
+# Create new container with minimal options
+- proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+# Create new container automatically selecting the next available vmid.
+- proxmox: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+# Create new container with minimal options with force(it will rewrite existing container)
+- proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ force: yes
+
+# Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
+- proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+
+# Create new container with minimal options defining network interface with dhcp
+- proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}'
+
+# Create new container with minimal options defining network interface with static ip
+- proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}'
+
+# Create new container with minimal options defining a mount
+- proxmox:
+ vmid: 100
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ password: 123456
+ hostname: example.org
+ ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
+ mounts: '{"mp0":"local:8,mp=/mnt/test/"}'
+
+# Start container
+- proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: started
+
+# Stop container
+- proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: stopped
+
+# Stop container with force
+- proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_passwordL 1q2w3e
+ api_host: node1
+ force: yes
+ state: stopped
+
+# Restart container(stopped or mounted container you can't restart)
+- proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: stopped
+
+# Remove container
+- proxmox:
+ vmid: 100
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ state: absent
+'''
+
+import os
+import time
+
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+
+VZ_TYPE=None
+
+def get_nextvmid(proxmox):
+ try:
+ vmid = proxmox.cluster.nextid.get()
+ return vmid
+ except Exception as e:
+ module.fail_json(msg="Unable to get next vmid. Failed with exception: %s")
+
+def get_vmid(proxmox, hostname):
+ return [ vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if vm['name'] == hostname ]
+
+def get_instance(proxmox, vmid):
+ return [ vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid) ]
+
+def content_check(proxmox, node, ostemplate, template_store):
+ return [ True for cnt in proxmox.nodes(node).storage(template_store).content.get() if cnt['volid'] == ostemplate ]
+
+def node_check(proxmox, node):
+ return [ True for nd in proxmox.nodes.get() if nd['node'] == node ]
+
+def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
+ proxmox_node = proxmox.nodes(node)
+ kwargs = dict((k,v) for k, v in kwargs.iteritems() if v is not None)
+ if VZ_TYPE =='lxc':
+ kwargs['cpulimit']=cpus
+ kwargs['rootfs']=disk
+ if 'netif' in kwargs:
+ kwargs.update(kwargs['netif'])
+ del kwargs['netif']
+ if 'mounts' in kwargs:
+ kwargs.update(kwargs['mounts'])
+ del kwargs['mounts']
+ else:
+ kwargs['cpus']=cpus
+ kwargs['disk']=disk
+ taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
+
+ while timeout:
+ if ( proxmox_node.tasks(taskid).status.get()['status'] == 'stopped'
+ and proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK' ):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s'
+ % proxmox_node.tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+def start_instance(module, proxmox, vm, vmid, timeout):
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
+ while timeout:
+ if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
+ and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s'
+ % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+def stop_instance(module, proxmox, vm, vmid, timeout, force):
+ if force:
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
+ else:
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
+ while timeout:
+ if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
+ and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s'
+ % proxmox_node.tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+def umount_instance(module, proxmox, vm, vmid, timeout):
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.umount.post()
+ while timeout:
+ if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
+ and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s'
+ % proxmox_node.tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ api_host = dict(required=True),
+ api_user = dict(required=True),
+ api_password = dict(no_log=True),
+ vmid = dict(required=False),
+ validate_certs = dict(type='bool', default='no'),
+ node = dict(),
+ pool = dict(),
+ password = dict(no_log=True),
+ hostname = dict(),
+ ostemplate = dict(),
+ disk = dict(type='str', default='3'),
+ cpus = dict(type='int', default=1),
+ memory = dict(type='int', default=512),
+ swap = dict(type='int', default=0),
+ netif = dict(type='dict'),
+ mounts = dict(type='dict'),
+ ip_address = dict(),
+ onboot = dict(type='bool', default='no'),
+ storage = dict(default='local'),
+ cpuunits = dict(type='int', default=1000),
+ nameserver = dict(),
+ searchdomain = dict(),
+ timeout = dict(type='int', default=30),
+ force = dict(type='bool', default='no'),
+ state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
+ )
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg='proxmoxer required for this module')
+
+ state = module.params['state']
+ api_user = module.params['api_user']
+ api_host = module.params['api_host']
+ api_password = module.params['api_password']
+ vmid = module.params['vmid']
+ validate_certs = module.params['validate_certs']
+ node = module.params['node']
+ disk = module.params['disk']
+ cpus = module.params['cpus']
+ memory = module.params['memory']
+ swap = module.params['swap']
+ storage = module.params['storage']
+ hostname = module.params['hostname']
+ if module.params['ostemplate'] is not None:
+ template_store = module.params['ostemplate'].split(":")[0]
+ timeout = module.params['timeout']
+
+ # If password not set get it from PROXMOX_PASSWORD env
+ if not api_password:
+ try:
+ api_password = os.environ['PROXMOX_PASSWORD']
+ except KeyError as e:
+ module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
+
+ try:
+ proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
+ global VZ_TYPE
+ VZ_TYPE = 'openvz' if float(proxmox.version.get()['version']) < 4.0 else 'lxc'
+
+ except Exception as e:
+ module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
+
+ # If vmid not set get the Next VM id from ProxmoxAPI
+ # If hostname is set get the VM id from ProxmoxAPI
+ if not vmid and state == 'present':
+ vmid = get_nextvmid(proxmox)
+ elif not vmid and hostname:
+ vmid = get_vmid(proxmox, hostname)[0]
+ elif not vmid:
+ module.exit_json(changed=False, msg="Vmid could not be fetched for the following action: %s" % state)
+
+ if state == 'present':
+ try:
+ if get_instance(proxmox, vmid) and not module.params['force']:
+ module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
+ # If no vmid was passed, there cannot be another VM named 'hostname'
+ if not module.params['vmid'] and get_vmid(proxmox, hostname) and not module.params['force']:
+ module.exit_json(changed=False, msg="VM with hostname %s already exists and has ID number %s" % (hostname, get_vmid(proxmox, hostname)[0]))
+ elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']):
+ module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm')
+ elif not node_check(proxmox, node):
+ module.fail_json(msg="node '%s' not exists in cluster" % node)
+ elif not content_check(proxmox, node, module.params['ostemplate'], template_store):
+ module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
+ % (module.params['ostemplate'], node, template_store))
+
+ create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
+ pool = module.params['pool'],
+ password = module.params['password'],
+ hostname = module.params['hostname'],
+ ostemplate = module.params['ostemplate'],
+ netif = module.params['netif'],
+ mounts = module.params['mounts'],
+ ip_address = module.params['ip_address'],
+ onboot = int(module.params['onboot']),
+ cpuunits = module.params['cpuunits'],
+ nameserver = module.params['nameserver'],
+ searchdomain = module.params['searchdomain'],
+ force = int(module.params['force']))
+
+ module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
+ except Exception as e:
+ module.fail_json(msg="creation of %s VM %s failed with exception: %s" % ( VZ_TYPE, vmid, e ))
+
+ elif state == 'started':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmid)
+
+ if start_instance(module, proxmox, vm, vmid, timeout):
+ module.exit_json(changed=True, msg="VM %s started" % vmid)
+ except Exception as e:
+ module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e ))
+
+ elif state == 'stopped':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
+ if module.params['force']:
+ if umount_instance(module, proxmox, vm, vmid, timeout):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ else:
+ module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
+ "You can use force option to umount it.") % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
+ module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
+
+ if stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ except Exception as e:
+ module.fail_json(msg="stopping of VM %s failed with exception: %s" % ( vmid, e ))
+
+ elif state == 'restarted':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
+ if ( getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped'
+ or getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted' ):
+ module.exit_json(changed=False, msg="VM %s is not running" % vmid)
+
+ if ( stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']) and
+ start_instance(module, proxmox, vm, vmid, timeout) ):
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
+ except Exception as e:
+ module.fail_json(msg="restarting of VM %s failed with exception: %s" % ( vmid, e ))
+
+ elif state == 'absent':
+ try:
+ vm = get_instance(proxmox, vmid)
+ if not vm:
+ module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
+ module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
+
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
+ while timeout:
+ if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
+ and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
+ module.exit_json(changed=True, msg="VM %s removed" % vmid)
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
+ % proxmox_node.tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ except Exception as e:
+ module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e ))
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/misc/proxmox_kvm.py b/lib/ansible/modules/cloud/misc/proxmox_kvm.py
new file mode 100644
index 0000000000..e77f266b42
--- /dev/null
+++ b/lib/ansible/modules/cloud/misc/proxmox_kvm.py
@@ -0,0 +1,1058 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Abdoul Bah (@helldorado) <abdoul.bah at alterway.fr>
+
+"""
+Ansible module to manage Qemu(KVM) instance in Proxmox VE cluster.
+This module is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+This software is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+You should have received a copy of the GNU General Public License
+along with this software. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: proxmox_kvm
+short_description: Management of Qemu(KVM) Virtual Machines in Proxmox VE cluster.
+description:
+ - Allows you to create/delete/stop Qemu(KVM) Virtual Machines in Proxmox VE cluster.
+version_added: "2.3"
+author: "Abdoul Bah (@helldorado) <abdoul.bah at alterway.fr>"
+options:
+ acpi:
+ description:
+ - Specify if ACPI should be enables/disabled.
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ type: boolean
+ agent:
+ description:
+ - Specify if the QEMU GuestAgent should be enabled/disabled.
+ required: false
+ default: null
+ choices: [ "yes", "no" ]
+ type: boolean
+ args:
+ description:
+ - Pass arbitrary arguments to kvm.
+ - This option is for experts only!
+ default: "-serial unix:/var/run/qemu-server/VMID.serial,server,nowait"
+ required: false
+ type: string
+ api_host:
+ description:
+ - Specify the target host of the Proxmox VE cluster.
+ required: true
+ api_user:
+ description:
+ - Specify the user to authenticate with.
+ required: true
+ api_password:
+ description:
+ - Specify the password to authenticate with.
+ - You can use C(PROXMOX_PASSWORD) environment variable.
+ default: null
+ required: false
+ autostart:
+ description:
+ - Specify, if the VM should be automatically restarted after crash (currently ignored in PVE API).
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ type: boolean
+ balloon:
+ description:
+ - Specify the amount of RAM for the VM in MB.
+ - Using zero disables the balloon driver.
+ required: false
+ default: 0
+ type: integer
+ bios:
+ description:
+ - Specify the BIOS implementation.
+ choices: ['seabios', 'ovmf']
+ required: false
+ default: null
+ type: string
+ boot:
+ description:
+ - Specify the boot order -> boot on floppy C(a), hard disk C(c), CD-ROM C(d), or network C(n).
+ - You can combine to set order.
+ required: false
+ default: cnd
+ type: string
+ bootdisk:
+ description:
+ - Enable booting from specified disk. C((ide|sata|scsi|virtio)\d+)
+ required: false
+ default: null
+ type: string
+ cores:
+ description:
+ - Specify number of cores per socket.
+ required: false
+ default: 1
+ type: integer
+ cpu:
+ description:
+ - Specify emulated CPU type.
+ required: false
+ default: kvm64
+ type: string
+ cpulimit:
+ description:
+ - Specify if CPU usage will be limited. Value 0 indicates no CPU limit.
+ - If the computer has 2 CPUs, it has total of '2' CPU time
+ required: false
+ default: null
+ type: integer
+ cpuunits:
+ description:
+ - Specify CPU weight for a VM.
+ - You can disable fair-scheduler configuration by setting this to 0
+ default: 1000
+ required: false
+ type: integer
+ delete:
+ description:
+ - Specify a list of settings you want to delete.
+ required: false
+ default: null
+ type: string
+ description:
+ description:
+ - Specify the description for the VM. Only used on the configuration web interface.
+ - This is saved as comment inside the configuration file.
+ required: false
+ default: null
+ type: string
+ digest:
+ description:
+ - Specify if to prevent changes if current configuration file has different SHA1 digest.
+ - This can be used to prevent concurrent modifications.
+ required: false
+ default: null
+ type: string
+ force:
+ description:
+ - Allow to force stop VM.
+ - Can be used only with states C(stopped), C(restarted).
+ default: null
+ choices: [ "yes", "no" ]
+ required: false
+ type: boolean
+ freeze:
+ description:
+ - Specify if PVE should freeze CPU at startup (use 'c' monitor command to start execution).
+ required: false
+ default: null
+ choices: [ "yes", "no" ]
+ type: boolean
+ hostpci:
+ description:
+ - Specify a hash/dictionary of map host pci devices into guest. C(hostpci='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(hostpci[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("host="HOSTPCIID[;HOSTPCIID2...]",pcie="1|0",rombar="1|0",x-vga="1|0"").
+ - The C(host) parameter is Host PCI device pass through. HOSTPCIID syntax is C(bus:dev.func) (hexadecimal numbers).
+ - C(pcie=boolean) I(default=0) Choose the PCI-express bus (needs the q35 machine model).
+ - C(rombar=boolean) I(default=1) Specify whether or not the device’s ROM will be visible in the guest’s memory map.
+ - C(x-vga=boolean) I(default=0) Enable vfio-vga device support.
+ - /!\ This option allows direct access to host hardware. So it is no longer possible to migrate such machines - use with special care.
+ required: false
+ default: null
+ type: A hash/dictionary defining host pci devices
+ hotplug:
+ description:
+ - Selectively enable hotplug features.
+ - This is a comma separated list of hotplug features C('network', 'disk', 'cpu', 'memory' and 'usb').
+ - Value 0 disables hotplug completely and value 1 is an alias for the default C('network,disk,usb').
+ required: false
+ default: null
+ type: string
+ hugepages:
+ description:
+ - Enable/disable hugepages memory.
+ choices: ['any', '2', '1024']
+ required: false
+ default: null
+ type: string
+ ide:
+ description:
+ - A hash/dictionary of volume used as IDE hard disk or CD-ROM. C(ide='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(ide[n]) where 0 ≤ n ≤ 3.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive’s backing file’s data format. C(qcow2|raw|subvol).
+ required: false
+ default: null
+ type: A hash/dictionary defining ide
+ keyboard:
+ description:
+ - Sets the keyboard layout for VNC server.
+ required: false
+ default: null
+ type: string
+ kvm:
+ description:
+ - Enable/disable KVM hardware virtualization.
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ type: boolean
+ localtime:
+ description:
+ - Sets the real time clock to local time.
+ - This is enabled by default if ostype indicates a Microsoft OS.
+ required: false
+ default: null
+ choices: [ "yes", "no" ]
+ type: boolean
+ lock:
+ description:
+ - Lock/unlock the VM.
+ choices: ['migrate', 'backup', 'snapshot', 'rollback']
+ required: false
+ default: null
+ type: string
+ machine:
+ description:
+ - Specifies the Qemu machine type.
+ - type => C((pc|pc(-i440fx)?-\d+\.\d+(\.pxe)?|q35|pc-q35-\d+\.\d+(\.pxe)?))
+ required: false
+ default: null
+ type: string
+ memory:
+ description:
+ - Memory size in MB for instance.
+ required: false
+ default: 512
+ type: integer
+ migrate_downtime:
+ description:
+ - Sets maximum tolerated downtime (in seconds) for migrations.
+ required: false
+ default: null
+ type: integer
+ migrate_speed:
+ description:
+ - Sets maximum speed (in MB/s) for migrations.
+ - A value of 0 is no limit.
+ required: false
+ default: null
+ type: integer
+ name:
+ description:
+ - Specifies the VM name. Only used on the configuration web interface.
+ - Required only for C(state=present).
+ default: null
+ required: false
+ net:
+ description:
+ - A hash/dictionary of network interfaces for the VM. C(net='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(net[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("model="XX:XX:XX:XX:XX:XX",brigde="value",rate="value",tag="value",firewall="1|0",trunks="vlanid"").
+ - Model is one of C(e1000 e1000-82540em e1000-82544gc e1000-82545em i82551 i82557b i82559er ne2k_isa ne2k_pci pcnet rtl8139 virtio vmxnet3).
+ - C(XX:XX:XX:XX:XX:XX) should be an unique MAC address. This is automatically generated if not specified.
+ - The C(bridge) parameter can be used to automatically add the interface to a bridge device. The Proxmox VE standard bridge is called 'vmbr0'.
+ - Option C(rate) is used to limit traffic bandwidth from and to this interface. It is specified as floating point number, unit is 'Megabytes per second'.
+ - If you specify no bridge, we create a kvm 'user' (NATed) network device, which provides DHCP and DNS services.
+ default: null
+ required: false
+ type: A hash/dictionary defining interfaces
+ node:
+ description:
+ - Proxmox VE node, where the new VM will be created.
+ - Only required for C(state=present).
+ - For other states, it will be autodiscovered.
+ default: null
+ required: false
+ numa:
+ description:
+ - A hash/dictionaries of NUMA topology. C(numa='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(numa[n]) where 0 ≤ n ≤ N.
+ - Values allowed are - C("cpu="<id[-id];...>",hostnodes="<id[-id];...>",memory="number",policy="(bind|interleave|preferred)"").
+ - C(cpus) CPUs accessing this NUMA node.
+ - C(hostnodes) Host NUMA nodes to use.
+ - C(memory) Amount of memory this NUMA node provides.
+ - C(policy) NUMA allocation policy.
+ default: null
+ required: false
+ type: A hash/dictionary defining NUMA topology
+ onboot:
+ description:
+ - Specifies whether a VM will be started during system bootup.
+ default: "yes"
+ choices: [ "yes", "no" ]
+ required: false
+ type: boolean
+ ostype:
+ description:
+ - Specifies guest operating system. This is used to enable special optimization/features for specific operating systems.
+ - The l26 is Linux 2.6/3.X Kernel.
+ choices: ['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'l24', 'l26', 'solaris']
+ default: l26
+ required: false
+ type: string
+ parallel:
+ description:
+ - A hash/dictionary of map host parallel devices. C(parallel='{"key":"value", "key":"value"}').
+ - Keys allowed are - (parallel[n]) where 0 ≤ n ≤ 2.
+ - Values allowed are - C("/dev/parport\d+|/dev/usb/lp\d+").
+ default: null
+ required: false
+ type: A hash/dictionary defining host parallel devices
+ protection:
+ description:
+ - Enable/disable the protection flag of the VM. This will enable/disable the remove VM and remove disk operations.
+ default: null
+ choices: [ "yes", "no" ]
+ required: false
+ type: boolean
+ reboot:
+ description:
+ - Allow reboot. If set to yes, the VM exit on reboot.
+ default: null
+ choices: [ "yes", "no" ]
+ required: false
+ type: boolean
+ revert:
+ description:
+ - Revert a pending change.
+ default: null
+ required: false
+ type: string
+ sata:
+ description:
+ - A hash/dictionary of volume used as sata hard disk or CD-ROM. C(sata='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 5.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive’s backing file’s data format. C(qcow2|raw|subvol).
+ default: null
+ required: false
+ type: A hash/dictionary defining sata
+ scsi:
+ description:
+ - A hash/dictionary of volume used as SCSI hard disk or CD-ROM. C(scsi='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(sata[n]) where 0 ≤ n ≤ 13.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive’s backing file’s data format. C(qcow2|raw|subvol).
+ default: null
+ required: false
+ type: A hash/dictionary defining scsi
+ scsihw:
+ description:
+ - Specifies the SCSI controller model.
+ choices: ['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']
+ required: false
+ default: null
+ type: string
+ serial:
+ description:
+ - A hash/dictionary of serial device to create inside the VM. C('{"key":"value", "key":"value"}').
+ - Keys allowed are - serial[n](str; required) where 0 ≤ n ≤ 3.
+ - Values allowed are - C((/dev/.+|socket)).
+ - /!\ If you pass through a host serial device, it is no longer possible to migrate such machines - use with special care.
+ default: null
+ required: false
+ type: A hash/dictionary defining serial
+ shares:
+ description:
+ - Rets amount of memory shares for auto-ballooning. (0 - 50000).
+ - The larger the number is, the more memory this VM gets.
+ - The number is relative to weights of all other running VMs.
+ - Using 0 disables auto-ballooning, this means no limit.
+ required: false
+ default: null
+ type: integer
+ skiplock:
+ description:
+ - Ignore locks
+ - Only root is allowed to use this option.
+ required: false
+ default: null
+ choices: [ "yes", "no" ]
+ type: boolean
+ smbios:
+ description:
+ - Specifies SMBIOS type 1 fields.
+ required: false
+ default: null
+ type: string
+ sockets:
+ description:
+ - Sets the number of CPU sockets. (1 - N).
+ required: false
+ default: 1
+ type: integer
+ startdate:
+ description:
+ - Sets the initial date of the real time clock.
+ - Valid format for date are C('now') or C('2016-09-25T16:01:21') or C('2016-09-25').
+ required: false
+ default: null
+ type: string
+ startup:
+ description:
+ - Startup and shutdown behavior. C([[order=]\d+] [,up=\d+] [,down=\d+]).
+ - Order is a non-negative number defining the general startup order.
+ - Shutdown in done with reverse ordering.
+ required: false
+ default: null
+ type: string
+ state:
+ description:
+ - Indicates desired state of the instance.
+ - If C(current), the current state of the VM will be fecthed. You can acces it with C(results.status)
+ choices: ['present', 'started', 'absent', 'stopped', 'restarted','current']
+ required: false
+ default: present
+ tablet:
+ description:
+ - Enables/disables the USB tablet device.
+ required: false
+ choices: [ "yes", "no" ]
+ default: "no"
+ type: boolean
+ tdf:
+ description:
+ - Enables/disables time drift fix.
+ required: false
+ default: null
+ choices: [ "yes", "no" ]
+ type: boolean
+ template:
+ description:
+ - Enables/disables the template.
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ type: boolean
+ timeout:
+ description:
+ - Timeout for operations.
+ default: 30
+ required: false
+ type: integer
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used on personally controlled sites using self-signed certificates.
+ default: "no"
+ choices: [ "yes", "no" ]
+ required: false
+ type: boolean
+ vcpus:
+ description:
+ - Sets number of hotplugged vcpus.
+ required: false
+ default: null
+ type: integer
+ vga:
+ description:
+ - Select VGA type. If you want to use high resolution modes (>= 1280x1024x16) then you should use option 'std' or 'vmware'.
+ choices: ['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']
+ required: false
+ default: std
+ virtio:
+ description:
+ - A hash/dictionary of volume used as VIRTIO hard disk. C(virtio='{"key":"value", "key":"value"}').
+ - Keys allowed are - C(virto[n]) where 0 ≤ n ≤ 15.
+ - Values allowed are - C("storage:size,format=value").
+ - C(storage) is the storage identifier where to create the disk.
+ - C(size) is the size of the disk in GB.
+ - C(format) is the drive’s backing file’s data format. C(qcow2|raw|subvol).
+ required: false
+ default: null
+ type: A hash/dictionary defining virtio
+ vmid:
+ description:
+ - Specifies the VM ID. Instead use I(name) parameter.
+ - If vmid is not set, the next available VM ID will be fetched from ProxmoxAPI.
+ default: null
+ required: false
+ watchdog:
+ description:
+ - Creates a virtual hardware watchdog device.
+ required: false
+ default: null
+ type: string
+Notes:
+ - Requires proxmoxer and requests modules on host. This modules can be installed with pip.
+requirements: [ "proxmoxer", "requests" ]
+'''
+
+EXAMPLES = '''
+# Create new VM with minimal options
+- proxmox_kvm:
+ api_user : root@pam
+ api_password: secret
+ api_host : helldorado
+ name : spynal
+ node : sabrewulf
+
+# Create new VM with minimal options and given vmid
+- proxmox_kvm:
+ api_user : root@pam
+ api_password: secret
+ api_host : helldorado
+ name : spynal
+ node : sabrewulf
+ vmid : 100
+
+# Create new VM with two network interface options.
+- proxmox_kvm:
+ api_user : root@pam
+ api_password: secret
+ api_host : helldorado
+ name : spynal
+ node : sabrewulf
+ net : '{"net0":"virtio,bridge=vmbr1,rate=200", "net1":"e1000,bridge=vmbr2,"}'
+
+# Create new VM with one network interface, three virto hard disk, 4 cores, and 2 vcpus.
+- proxmox_kvm:
+ api_user : root@pam
+ api_password: secret
+ api_host : helldorado
+ name : spynal
+ node : sabrewulf
+ net : '{"net0":"virtio,bridge=vmbr1,rate=200"}'
+ virtio : '{"virtio0":"VMs_LVM:10", "virtio1":"VMs:2,format=qcow2", "virtio2":"VMs:5,format=raw"}'
+ cores : 4
+ vcpus : 2
+
+# Create new VM and lock it for snapashot.
+- proxmox_kvm:
+ api_user : root@pam
+ api_password: secret
+ api_host : helldorado
+ name : spynal
+ node : sabrewulf
+ lock : snapshot
+
+# Create new VM and set protection to disable the remove VM and remove disk operations
+- proxmox_kvm:
+ api_user : root@pam
+ api_password: secret
+ api_host : helldorado
+ name : spynal
+ node : sabrewulf
+ protection : yes
+
+# Start VM
+- proxmox_kvm:
+ api_user : root@pam
+ api_password: secret
+ api_host : helldorado
+ name : spynal
+ node : sabrewulf
+ state : started
+
+# Stop VM
+- proxmox_kvm:
+ api_user : root@pam
+ api_password: secret
+ api_host : helldorado
+ name : spynal
+ node : sabrewulf
+ state : stopped
+
+# Stop VM with force
+- proxmox_kvm:
+ api_user : root@pam
+ api_password: secret
+ api_host : helldorado
+ name : spynal
+ node : sabrewulf
+ state : stopped
+ force : yes
+
+# Restart VM
+- proxmox_kvm:
+ api_user : root@pam
+ api_password: secret
+ api_host : helldorado
+ name : spynal
+ node : sabrewulf
+ state : restarted
+
+# Remove VM
+- proxmox_kvm:
+ api_user : root@pam
+ api_password: secret
+ api_host : helldorado
+ name : spynal
+ node : sabrewulf
+ state : absent
+
+# Get VM current state
+- proxmox_kvm:
+ api_user : root@pam
+ api_password: secret
+ api_host : helldorado
+ name : spynal
+ node : sabrewulf
+ state : current
+'''
+
+RETURN = '''
+devices:
+ description: The list of devices created or used.
+ returned: success
+ type: dict
+ sample: '
+ {
+ "ide0": "VMS_LVM:vm-115-disk-1",
+ "ide1": "VMs:115/vm-115-disk-3.raw",
+ "virtio0": "VMS_LVM:vm-115-disk-2",
+ "virtio1": "VMs:115/vm-115-disk-1.qcow2",
+ "virtio2": "VMs:115/vm-115-disk-2.raw"
+ }'
+mac:
+ description: List of mac address created and net[n] attached. Useful when you want to use provision systems like Foreman via PXE.
+ returned: success
+ type: dict
+ sample: '
+ {
+ "net0": "3E:6E:97:D2:31:9F",
+ "net1": "B6:A1:FC:EF:78:A4"
+ }'
+vmid:
+ description: The VM vmid.
+ returned: success
+ type: int
+ sample: 115
+status:
+ description:
+ - The current virtual machine status.
+ - Returned only when C(state=current)
+ returned: success
+ type: dict
+ sample: '{
+ "changed": false,
+ "msg": "VM kropta with vmid = 110 is running",
+ "status": "running"
+ }'
+'''
+
+import os
+import time
+
+
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+
+VZ_TYPE='qemu'
+
+def get_nextvmid(proxmox):
+ try:
+ vmid = proxmox.cluster.nextid.get()
+ return vmid
+ except Exception as e:
+ module.fail_json(msg="Unable to get next vmid. Failed with exception: %s")
+
+def get_vmid(proxmox, name):
+ return [ vm['vmid'] for vm in proxmox.cluster.resources.get(type='vm') if vm['name'] == name ]
+
+def get_vm(proxmox, vmid):
+ return [ vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid) ]
+
+def node_check(proxmox, node):
+ return [ True for nd in proxmox.nodes.get() if nd['node'] == node ]
+
+def get_vminfo(module, proxmox, node, vmid, **kwargs):
+ global results
+ results = {}
+ mac = {}
+ devices = {}
+ try:
+ vm = proxmox.nodes(node).qemu(vmid).config.get()
+ except Exception as e:
+ module.fail_json(msg='Getting information for VM with vmid = %s failed with exception: %s' % (vmid, e))
+
+ # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
+ kwargs = dict((k,v) for k, v in kwargs.iteritems() if v is not None)
+
+ # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n]
+ for k in kwargs.keys():
+ if isinstance(kwargs[k], dict):
+ kwargs.update(kwargs[k])
+ del kwargs[k]
+
+ # Split information by type
+ for k, v in kwargs.iteritems():
+ if re.match(r'net[0-9]', k) is not None:
+ interface = k
+ k = vm[k]
+ k = re.search('=(.*?),', k).group(1)
+ mac[interface] = k
+ if re.match(r'virtio[0-9]', k) is not None or re.match(r'ide[0-9]', k) is not None or re.match(r'scsi[0-9]', k) is not None or re.match(r'sata[0-9]', k) is not None:
+ device = k
+ k = vm[k]
+ k = re.search('(.*?),', k).group(1)
+ devices[device] = k
+
+ results['mac'] = mac
+ results['devices'] = devices
+ results['vmid'] = int(vmid)
+
+def create_vm(module, proxmox, vmid, node, name, memory, cpu, cores, sockets, timeout, **kwargs):
+ # Available only in PVE 4
+ only_v4 = ['force','protection','skiplock']
+ # Default args for vm. Note: -args option is for experts only. It allows you to pass arbitrary arguments to kvm.
+ vm_args = "-serial unix:/var/run/qemu-server/{}.serial,server,nowait".format(vmid)
+
+ proxmox_node = proxmox.nodes(node)
+
+ # Sanitize kwargs. Remove not defined args and ensure True and False converted to int.
+ kwargs = dict((k,v) for k, v in kwargs.iteritems() if v is not None)
+ kwargs.update(dict([k, int(v)] for k, v in kwargs.iteritems() if isinstance(v, bool)))
+
+ # The features work only on PVE 4
+ if PVE_MAJOR_VERSION < 4:
+ for p in only_v4:
+ if p in kwargs:
+ del kwargs[p]
+
+ # Convert all dict in kwargs to elements. For hostpci[n], ide[n], net[n], numa[n], parallel[n], sata[n], scsi[n], serial[n], virtio[n]
+ for k in kwargs.keys():
+ if isinstance(kwargs[k], dict):
+ kwargs.update(kwargs[k])
+ del kwargs[k]
+
+ # -args and skiplock require root@pam user
+ if module.params['api_user'] == "root@pam" and module.params['args'] is None:
+ kwargs['args'] = vm_args
+ elif module.params['api_user'] == "root@pam" and module.params['args'] is not None:
+ kwargs['args'] = module.params['args']
+ elif module.params['api_user'] != "root@pam" and module.params['args'] is not None:
+ module.fail_json(msg='args parameter require root@pam user. ')
+
+ if module.params['api_user'] != "root@pam" and module.params['skiplock'] is not None:
+ module.fail_json(msg='skiplock parameter require root@pam user. ')
+
+ taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, name=name, memory=memory, cpu=cpu, cores=cores, sockets=sockets, **kwargs)
+
+ while timeout:
+ if ( proxmox_node.tasks(taskid).status.get()['status'] == 'stopped'
+ and proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK' ):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s'
+ % proxmox_node.tasks(taskid).log.get()[:1])
+ time.sleep(1)
+ return False
+
+def start_vm(module, proxmox, vm, vmid, timeout):
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
+ while timeout:
+ if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
+ and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s'
+ % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+def stop_vm(module, proxmox, vm, vmid, timeout, force):
+ if force:
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
+ else:
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
+ while timeout:
+ if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
+ and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s'
+ % proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ acpi = dict(type='bool', default='yes'),
+ agent = dict(type='bool'),
+ args = dict(type='str', default=None),
+ api_host = dict(required=True),
+ api_user = dict(required=True),
+ api_password = dict(no_log=True),
+ autostart = dict(type='bool', default='no'),
+ balloon = dict(type='int',default=0),
+ bios = dict(choices=['seabios', 'ovmf']),
+ boot = dict(type='str', default='cnd'),
+ bootdisk = dict(type='str'),
+ cores = dict(type='int', default=1),
+ cpu = dict(type='str', default='kvm64'),
+ cpulimit = dict(type='int'),
+ cpuunits = dict(type='int', default=1000),
+ delete = dict(type='str'),
+ description = dict(type='str'),
+ digest = dict(type='str'),
+ force = dict(type='bool', default=None),
+ freeze = dict(type='bool'),
+ hostpci = dict(type='dict'),
+ hotplug = dict(type='str'),
+ hugepages = dict(choices=['any', '2', '1024']),
+ ide = dict(type='dict', default=None),
+ keyboard = dict(type='str'),
+ kvm = dict(type='bool', default='yes'),
+ localtime = dict(type='bool'),
+ lock = dict(choices=['migrate', 'backup', 'snapshot', 'rollback']),
+ machine = dict(type='str'),
+ memory = dict(type='int', default=512),
+ migrate_downtime = dict(type='int'),
+ migrate_speed = dict(type='int'),
+ name = dict(type='str'),
+ net = dict(type='dict'),
+ node = dict(),
+ numa = dict(type='dict'),
+ onboot = dict(type='bool', default='yes'),
+ ostype = dict(default='l26', choices=['other', 'wxp', 'w2k', 'w2k3', 'w2k8', 'wvista', 'win7', 'win8', 'l24', 'l26', 'solaris']),
+ parallel = dict(type='dict'),
+ protection = dict(type='bool'),
+ reboot = dict(type='bool'),
+ revert = dict(),
+ sata = dict(type='dict'),
+ scsi = dict(type='dict'),
+ scsihw = dict(choices=['lsi', 'lsi53c810', 'virtio-scsi-pci', 'virtio-scsi-single', 'megasas', 'pvscsi']),
+ serial = dict(type='dict'),
+ shares = dict(type='int'),
+ skiplock = dict(type='bool'),
+ smbios = dict(type='str'),
+ sockets = dict(type='int', default=1),
+ startdate = dict(type='str'),
+ startup = dict(),
+ state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted', 'current']),
+ tablet = dict(type='bool', default='no'),
+ tdf = dict(type='bool'),
+ template = dict(type='bool', default='no'),
+ timeout = dict(type='int', default=30),
+ validate_certs = dict(type='bool', default='no'),
+ vcpus = dict(type='int', default=None),
+ vga = dict(default='std', choices=['std', 'cirrus', 'vmware', 'qxl', 'serial0', 'serial1', 'serial2', 'serial3', 'qxl2', 'qxl3', 'qxl4']),
+ virtio = dict(type='dict', default=None),
+ vmid = dict(type='int', default=None),
+ watchdog = dict(),
+ )
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg='proxmoxer required for this module')
+
+ api_user = module.params['api_user']
+ api_host = module.params['api_host']
+ api_password = module.params['api_password']
+ cpu = module.params['cpu']
+ cores = module.params['cores']
+ memory = module.params['memory']
+ name = module.params['name']
+ node = module.params['node']
+ sockets = module.params['sockets'],
+ state = module.params['state']
+ timeout = module.params['timeout']
+ validate_certs = module.params['validate_certs']
+
+ # If password not set get it from PROXMOX_PASSWORD env
+ if not api_password:
+ try:
+ api_password = os.environ['PROXMOX_PASSWORD']
+ except KeyError as e:
+ module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
+
+ try:
+ proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
+ global VZ_TYPE
+ global PVE_MAJOR_VERSION
+ PVE_MAJOR_VERSION = 3 if float(proxmox.version.get()['version']) < 4.0 else 4
+ except Exception as e:
+ module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
+
+
+ # If vmid not set get the Next VM id from ProxmoxAPI
+ # If vm name is set get the VM id from ProxmoxAPI
+ if module.params['vmid'] is not None:
+ vmid = module.params['vmid']
+ elif state == 'present':
+ vmid = get_nextvmid(proxmox)
+ elif module.params['name'] is not None:
+ vmid = get_vmid(proxmox, name)[0]
+
+ if state == 'present':
+ try:
+ if get_vm(proxmox, vmid) and not module.params['force']:
+ module.exit_json(changed=False, msg="VM with vmid <%s> already exists" % vmid)
+ elif get_vmid(proxmox, name) and not module.params['force']:
+ module.exit_json(changed=False, msg="VM with name <%s> already exists" % name)
+ elif not (node, module.params['name']):
+ module.fail_json(msg='node, name is mandatory for creating vm')
+ elif not node_check(proxmox, node):
+ module.fail_json(msg="node '%s' does not exist in cluster" % node)
+
+ create_vm(module, proxmox, vmid, node, name, memory, cpu, cores, sockets, timeout,
+ acpi = module.params['acpi'],
+ agent = module.params['agent'],
+ autostart = module.params['autostart'],
+ balloon = module.params['balloon'],
+ bios = module.params['bios'],
+ boot = module.params['boot'],
+ bootdisk = module.params['bootdisk'],
+ cpulimit = module.params['cpulimit'],
+ cpuunits = module.params['cpuunits'],
+ delete = module.params['delete'],
+ description = module.params['description'],
+ digest = module.params['digest'],
+ force = module.params['force'],
+ freeze = module.params['freeze'],
+ hostpci = module.params['hostpci'],
+ hotplug = module.params['hotplug'],
+ hugepages = module.params['hugepages'],
+ ide = module.params['ide'],
+ keyboard = module.params['keyboard'],
+ kvm = module.params['kvm'],
+ localtime = module.params['localtime'],
+ lock = module.params['lock'],
+ machine = module.params['machine'],
+ migrate_downtime = module.params['migrate_downtime'],
+ migrate_speed = module.params['migrate_speed'],
+ net = module.params['net'],
+ numa = module.params['numa'],
+ onboot = module.params['onboot'],
+ ostype = module.params['ostype'],
+ parallel = module.params['parallel'],
+ protection = module.params['protection'],
+ reboot = module.params['reboot'],
+ revert = module.params['revert'],
+ sata = module.params['sata'],
+ scsi = module.params['scsi'],
+ scsihw = module.params['scsihw'],
+ serial = module.params['serial'],
+ shares = module.params['shares'],
+ skiplock = module.params['skiplock'],
+ smbios1 = module.params['smbios'],
+ startdate = module.params['startdate'],
+ startup = module.params['startup'],
+ tablet = module.params['tablet'],
+ tdf = module.params['tdf'],
+ template = module.params['template'],
+ vcpus = module.params['vcpus'],
+ vga = module.params['vga'],
+ virtio = module.params['virtio'],
+ watchdog = module.params['watchdog'])
+
+ get_vminfo(module, proxmox, node, vmid,
+ ide = module.params['ide'],
+ net = module.params['net'],
+ sata = module.params['sata'],
+ scsi = module.params['scsi'],
+ virtio = module.params['virtio'])
+ module.exit_json(changed=True, msg="VM %s with vmid %s deployed" % (name, vmid), **results)
+ except Exception as e:
+ module.fail_json(msg="creation of %s VM %s with vmid %s failed with exception: %s" % ( VZ_TYPE, name, vmid, e ))
+
+ elif state == 'started':
+ try:
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid <%s> does not exist in cluster' % vmid)
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is already running" % vmid)
+
+ if start_vm(module, proxmox, vm, vmid, timeout):
+ module.exit_json(changed=True, msg="VM %s started" % vmid)
+ except Exception as e:
+ module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e ))
+
+ elif state == 'stopped':
+ try:
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
+ module.exit_json(changed=False, msg="VM %s is already stopped" % vmid)
+
+ if stop_vm(module, proxmox, vm, vmid, timeout, force = module.params['force']):
+ module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
+ except Exception as e:
+ module.fail_json(msg="stopping of VM %s failed with exception: %s" % ( vmid, e ))
+
+ elif state == 'restarted':
+ try:
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
+ module.exit_json(changed=False, msg="VM %s is not running" % vmid)
+
+ if ( stop_vm(module, proxmox, vm, vmid, timeout, force = module.params['force']) and
+ start_vm(module, proxmox, vm, vmid, timeout) ):
+ module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
+ except Exception as e:
+ module.fail_json(msg="restarting of VM %s failed with exception: %s" % ( vmid, e ))
+
+ elif state == 'absent':
+ try:
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
+
+ if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
+ module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
+
+ taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
+ while timeout:
+ if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
+ and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
+ module.exit_json(changed=True, msg="VM %s removed" % vmid)
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
+ % proxmox_node.tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ except Exception as e:
+ module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e ))
+
+ elif state == 'current':
+ status = {}
+ try:
+ vm = get_vm(proxmox, vmid)
+ if not vm:
+ module.fail_json(msg='VM with vmid = %s does not exist in cluster' % vmid)
+ current = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status']
+ status['status'] = current
+ if status:
+ module.exit_json(changed=False, msg="VM %s with vmid = %s is %s" % (name, vmid, current), **status)
+ except Exception as e:
+ module.fail_json(msg="Unable to get vm {} with vmid = {} status: ".format(name, vmid) + str(e))
+
+# import module snippets
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/misc/proxmox_template.py b/lib/ansible/modules/cloud/misc/proxmox_template.py
new file mode 100644
index 0000000000..64c9b96cb6
--- /dev/null
+++ b/lib/ansible/modules/cloud/misc/proxmox_template.py
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: proxmox_template
+short_description: management of OS templates in Proxmox VE cluster
+description:
+ - allows you to upload/delete templates in Proxmox VE cluster
+version_added: "2.0"
+options:
+ api_host:
+ description:
+ - the host of the Proxmox VE cluster
+ required: true
+ api_user:
+ description:
+ - the user to authenticate with
+ required: true
+ api_password:
+ description:
+ - the password to authenticate with
+ - you can use PROXMOX_PASSWORD environment variable
+ default: null
+ required: false
+ validate_certs:
+ description:
+ - enable / disable https certificate verification
+ default: false
+ required: false
+ type: boolean
+ node:
+ description:
+ - Proxmox VE node, when you will operate with template
+ default: null
+ required: true
+ src:
+ description:
+ - path to uploaded file
+ - required only for C(state=present)
+ default: null
+ required: false
+ aliases: ['path']
+ template:
+ description:
+ - the template name
+ - required only for states C(absent), C(info)
+ default: null
+ required: false
+ content_type:
+ description:
+ - content type
+ - required only for C(state=present)
+ default: 'vztmpl'
+ required: false
+ choices: ['vztmpl', 'iso']
+ storage:
+ description:
+ - target storage
+ default: 'local'
+ required: false
+ type: string
+ timeout:
+ description:
+ - timeout for operations
+ default: 30
+ required: false
+ type: integer
+ force:
+ description:
+ - can be used only with C(state=present), exists template will be overwritten
+ default: false
+ required: false
+ type: boolean
+ state:
+ description:
+ - Indicate desired state of the template
+ choices: ['present', 'absent']
+ default: present
+notes:
+ - Requires proxmoxer and requests modules on host. This modules can be installed with pip.
+requirements: [ "proxmoxer", "requests" ]
+author: "Sergei Antipov @UnderGreen"
+'''
+
+EXAMPLES = '''
+# Upload new openvz template with minimal options
+- proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+
+# Upload new openvz template with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
+- proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_host: node1
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+
+# Upload new openvz template with all options and force overwrite
+- proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ storage: local
+ content_type: vztmpl
+ src: ~/ubuntu-14.04-x86_64.tar.gz
+ force: yes
+
+# Delete template with minimal options
+- proxmox_template:
+ node: uk-mc02
+ api_user: root@pam
+ api_password: 1q2w3e
+ api_host: node1
+ template: ubuntu-14.04-x86_64.tar.gz
+ state: absent
+'''
+
+import os
+import time
+
+try:
+ from proxmoxer import ProxmoxAPI
+ HAS_PROXMOXER = True
+except ImportError:
+ HAS_PROXMOXER = False
+
+def get_template(proxmox, node, storage, content_type, template):
+ return [ True for tmpl in proxmox.nodes(node).storage(storage).content.get()
+ if tmpl['volid'] == '%s:%s/%s' % (storage, content_type, template) ]
+
+def upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout):
+ taskid = proxmox.nodes(node).storage(storage).upload.post(content=content_type, filename=open(realpath))
+ while timeout:
+ task_status = proxmox.nodes(api_host.split('.')[0]).tasks(taskid).status.get()
+ if task_status['status'] == 'stopped' and task_status['exitstatus'] == 'OK':
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for uploading template. Last line in task before timeout: %s'
+ % proxmox.node(node).tasks(taskid).log.get()[:1])
+
+ time.sleep(1)
+ return False
+
+def delete_template(module, proxmox, node, storage, content_type, template, timeout):
+ volid = '%s:%s/%s' % (storage, content_type, template)
+ proxmox.nodes(node).storage(storage).content.delete(volid)
+ while timeout:
+ if not get_template(proxmox, node, storage, content_type, template):
+ return True
+ timeout = timeout - 1
+ if timeout == 0:
+ module.fail_json(msg='Reached timeout while waiting for deleting template.')
+
+ time.sleep(1)
+ return False
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ api_host = dict(required=True),
+ api_user = dict(required=True),
+ api_password = dict(no_log=True),
+ validate_certs = dict(type='bool', default='no'),
+ node = dict(),
+ src = dict(),
+ template = dict(),
+ content_type = dict(default='vztmpl', choices=['vztmpl','iso']),
+ storage = dict(default='local'),
+ timeout = dict(type='int', default=30),
+ force = dict(type='bool', default='no'),
+ state = dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ if not HAS_PROXMOXER:
+ module.fail_json(msg='proxmoxer required for this module')
+
+ state = module.params['state']
+ api_user = module.params['api_user']
+ api_host = module.params['api_host']
+ api_password = module.params['api_password']
+ validate_certs = module.params['validate_certs']
+ node = module.params['node']
+ storage = module.params['storage']
+ timeout = module.params['timeout']
+
+ # If password not set get it from PROXMOX_PASSWORD env
+ if not api_password:
+ try:
+ api_password = os.environ['PROXMOX_PASSWORD']
+ except KeyError as e:
+ module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
+
+ try:
+ proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
+ except Exception as e:
+ module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
+
+ if state == 'present':
+ try:
+ content_type = module.params['content_type']
+ src = module.params['src']
+
+ from ansible import utils
+ realpath = utils.path_dwim(None, src)
+ template = os.path.basename(realpath)
+ if get_template(proxmox, node, storage, content_type, template) and not module.params['force']:
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already exists' % (storage, content_type, template))
+ elif not src:
+ module.fail_json(msg='src param to uploading template file is mandatory')
+ elif not (os.path.exists(realpath) and os.path.isfile(realpath)):
+ module.fail_json(msg='template file on path %s not exists' % realpath)
+
+ if upload_template(module, proxmox, api_host, node, storage, content_type, realpath, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s uploaded' % (storage, content_type, template))
+ except Exception as e:
+ module.fail_json(msg="uploading of template %s failed with exception: %s" % ( template, e ))
+
+ elif state == 'absent':
+ try:
+ content_type = module.params['content_type']
+ template = module.params['template']
+
+ if not template:
+ module.fail_json(msg='template param is mandatory')
+ elif not get_template(proxmox, node, storage, content_type, template):
+ module.exit_json(changed=False, msg='template with volid=%s:%s/%s is already deleted' % (storage, content_type, template))
+
+ if delete_template(module, proxmox, node, storage, content_type, template, timeout):
+ module.exit_json(changed=True, msg='template with volid=%s:%s/%s deleted' % (storage, content_type, template))
+ except Exception as e:
+ module.fail_json(msg="deleting of template %s failed with exception: %s" % ( template, e ))
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/misc/rhevm.py b/lib/ansible/modules/cloud/misc/rhevm.py
new file mode 100644
index 0000000000..8789e88028
--- /dev/null
+++ b/lib/ansible/modules/cloud/misc/rhevm.py
@@ -0,0 +1,1534 @@
+#!/usr/bin/python
+
+# (c) 2016, Timothy Vandenbrande <timothy.vandenbrande@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: rhevm
+author: Timothy Vandenbrande
+short_description: RHEV/oVirt automation
+description:
+ - Allows you to create/remove/update or powermanage virtual machines on a RHEV/oVirt platform.
+version_added: "2.2"
+requirements:
+ - ovirtsdk
+options:
+ user:
+ description:
+ - The user to authenticate with.
+ default: "admin@internal"
+ required: false
+ server:
+ description:
+ - The name/ip of your RHEV-m/oVirt instance.
+ default: "127.0.0.1"
+ required: false
+ port:
+ description:
+ - The port on which the API is reacheable.
+ default: "443"
+ required: false
+ insecure_api:
+ description:
+ - A boolean switch to make a secure or insecure connection to the server.
+ default: false
+ required: false
+ name:
+ description:
+ - The name of the VM.
+ cluster:
+ description:
+ - The rhev/ovirt cluster in which you want you VM to start.
+ required: false
+ datacenter:
+ description:
+ - The rhev/ovirt datacenter in which you want you VM to start.
+ required: false
+ default: "Default"
+ state:
+ description:
+ - This serves to create/remove/update or powermanage your VM.
+ default: "present"
+ required: false
+ choices: ['ping', 'present', 'absent', 'up', 'down', 'restarted', 'cd', 'info']
+ image:
+ description:
+ - The template to use for the VM.
+ default: null
+ required: false
+ type:
+ description:
+ - To define if the VM is a server or desktop.
+ default: server
+ required: false
+ choices: [ 'server', 'desktop', 'host' ]
+ vmhost:
+ description:
+ - The host you wish your VM to run on.
+ required: false
+ vmcpu:
+ description:
+ - The number of CPUs you want in your VM.
+ default: "2"
+ required: false
+ cpu_share:
+ description:
+ - This parameter is used to configure the cpu share.
+ default: "0"
+ required: false
+ vmmem:
+ description:
+ - The amount of memory you want your VM to use (in GB).
+ default: "1"
+ required: false
+ osver:
+ description:
+ - The operationsystem option in RHEV/oVirt.
+ default: "rhel_6x64"
+ required: false
+ mempol:
+ description:
+ - The minimum amount of memory you wish to reserve for this system.
+ default: "1"
+ required: false
+ vm_ha:
+ description:
+ - To make your VM High Available.
+ default: true
+ required: false
+ disks:
+ description:
+ - This option uses complex arguments and is a list of disks with the options name, size and domain.
+ required: false
+ ifaces:
+ description:
+ - This option uses complex arguments and is a list of interfaces with the options name and vlan.
+ aliases: ['nics', 'interfaces']
+ required: false
+ boot_order:
+ description:
+ - This option uses complex arguments and is a list of items that specify the bootorder.
+ default: ["network","hd"]
+ required: false
+ del_prot:
+ description:
+ - This option sets the delete protection checkbox.
+ default: true
+ required: false
+ cd_drive:
+ description:
+ - The CD you wish to have mounted on the VM when I(state = 'CD').
+ default: null
+ required: false
+ timeout:
+ description:
+ - The timeout you wish to define for power actions.
+ - When I(state = 'up')
+ - When I(state = 'down')
+ - When I(state = 'restarted')
+ default: null
+ required: false
+'''
+
+RETURN = '''
+vm:
+ description: Returns all of the VMs variables and execution.
+ returned: always
+ type: dict
+ sample: '{
+ "boot_order": [
+ "hd",
+ "network"
+ ],
+ "changed": true,
+ "changes": [
+ "Delete Protection"
+ ],
+ "cluster": "C1",
+ "cpu_share": "0",
+ "created": false,
+ "datacenter": "Default",
+ "del_prot": true,
+ "disks": [
+ {
+ "domain": "ssd-san",
+ "name": "OS",
+ "size": 40
+ }
+ ],
+ "eth0": "00:00:5E:00:53:00",
+ "eth1": "00:00:5E:00:53:01",
+ "eth2": "00:00:5E:00:53:02",
+ "exists": true,
+ "failed": false,
+ "ifaces": [
+ {
+ "name": "eth0",
+ "vlan": "Management"
+ },
+ {
+ "name": "eth1",
+ "vlan": "Internal"
+ },
+ {
+ "name": "eth2",
+ "vlan": "External"
+ }
+ ],
+ "image": false,
+ "mempol": "0",
+ "msg": [
+ "VM exists",
+ "cpu_share was already set to 0",
+ "VM high availability was already set to True",
+ "The boot order has already been set",
+ "VM delete protection has been set to True",
+ "Disk web2_Disk0_OS already exists",
+ "The VM starting host was already set to host416"
+ ],
+ "name": "web2",
+ "type": "server",
+ "uuid": "4ba5a1be-e60b-4368-9533-920f156c817b",
+ "vm_ha": true,
+ "vmcpu": "4",
+ "vmhost": "host416",
+ "vmmem": "16"
+ }'
+'''
+
+EXAMPLES = '''
+# basic get info from VM
+ action: rhevm
+ args:
+ name: "demo"
+ user: "{{ rhev.admin.name }}"
+ password: "{{ rhev.admin.pass }}"
+ server: "rhevm01"
+ state: "info"
+
+# basic create example from image
+ action: rhevm
+ args:
+ name: "demo"
+ user: "{{ rhev.admin.name }}"
+ password: "{{ rhev.admin.pass }}"
+ server: "rhevm01"
+ state: "present"
+ image: "centos7_x64"
+ cluster: "centos"
+
+# power management
+ action: rhevm
+ args:
+ name: "uptime_server"
+ user: "{{ rhev.admin.name }}"
+ password: "{{ rhev.admin.pass }}"
+ server: "rhevm01"
+ cluster: "RH"
+ state: "down"
+ image: "centos7_x64"
+ cluster: "centos
+
+# multi disk, multi nic create example
+ action: rhevm
+ args:
+ name: "server007"
+ user: "{{ rhev.admin.name }}"
+ password: "{{ rhev.admin.pass }}"
+ server: "rhevm01"
+ cluster: "RH"
+ state: "present"
+ type: "server"
+ vmcpu: 4
+ vmmem: 2
+ ifaces:
+ - name: "eth0"
+ vlan: "vlan2202"
+ - name: "eth1"
+ vlan: "vlan36"
+ - name: "eth2"
+ vlan: "vlan38"
+ - name: "eth3"
+ vlan: "vlan2202"
+ disks:
+ - name: "root"
+ size: 10
+ domain: "ssd-san"
+ - name: "swap"
+ size: 10
+ domain: "15kiscsi-san"
+ - name: "opt"
+ size: 10
+ domain: "15kiscsi-san"
+ - name: "var"
+ size: 10
+ domain: "10kiscsi-san"
+ - name: "home"
+ size: 10
+ domain: "sata-san"
+ boot_order:
+ - "network"
+ - "hd"
+
+# add a CD to the disk cd_drive
+ action: rhevm
+ args:
+ name: 'server007'
+ user: "{{ rhev.admin.name }}"
+ password: "{{ rhev.admin.pass }}"
+ state: 'cd'
+ cd_drive: 'rhev-tools-setup.iso'
+
+# new host deployment + host network configuration
+ action: rhevm
+ args:
+ name: "ovirt_node007"
+ password: "{{ rhevm.admin.pass }}"
+ type: "host"
+ state: present
+ cluster: "rhevm01"
+ ifaces:
+ - name: em1
+ - name: em2
+ - name: p3p1
+ ip: '172.31.224.200'
+ netmask: '255.255.254.0'
+ - name: p3p2
+ ip: '172.31.225.200'
+ netmask: '255.255.254.0'
+ - name: bond0
+ bond:
+ - em1
+ - em2
+ network: 'rhevm'
+ ip: '172.31.222.200'
+ netmask: '255.255.255.0'
+ management: True
+ - name: bond0.36
+ network: 'vlan36'
+ ip: '10.2.36.200'
+ netmask: '255.255.254.0'
+ gateway: '10.2.36.254'
+ - name: bond0.2202
+ network: 'vlan2202'
+ - name: bond0.38
+ network: 'vlan38'
+'''
+
+import time
+import sys
+import traceback
+import json
+
+try:
+ from ovirtsdk.api import API
+ from ovirtsdk.xml import params
+ HAS_SDK = True
+except ImportError:
+ HAS_SDK = False
+
+RHEV_FAILED = 1
+RHEV_SUCCESS = 0
+RHEV_UNAVAILABLE = 2
+
+RHEV_TYPE_OPTS = ['server', 'desktop', 'host']
+STATE_OPTS = ['ping', 'present', 'absent', 'up', 'down', 'restart', 'cd', 'info']
+
+global msg, changed, failed
+msg = []
+changed = False
+failed = False
+
+
+class RHEVConn(object):
+ 'Connection to RHEV-M'
+ def __init__(self, module):
+ self.module = module
+
+ user = module.params.get('user')
+ password = module.params.get('password')
+ server = module.params.get('server')
+ port = module.params.get('port')
+ insecure_api = module.params.get('insecure_api')
+
+ url = "https://%s:%s" % (server, port)
+
+ try:
+ api = API(url=url, username=user, password=password, insecure=str(insecure_api))
+ api.test()
+ self.conn = api
+ except:
+ raise Exception("Failed to connect to RHEV-M.")
+
+ def __del__(self):
+ self.conn.disconnect()
+
+ def createVMimage(self, name, cluster, template):
+ try:
+ vmparams = params.VM(
+ name=name,
+ cluster=self.conn.clusters.get(name=cluster),
+ template=self.conn.templates.get(name=template),
+ disks=params.Disks(clone=True)
+ )
+ self.conn.vms.add(vmparams)
+ setMsg("VM is created")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to create VM")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def createVM(self, name, cluster, os, actiontype):
+ try:
+ vmparams = params.VM(
+ name=name,
+ cluster=self.conn.clusters.get(name=cluster),
+ os=params.OperatingSystem(type_=os),
+ template=self.conn.templates.get(name="Blank"),
+ type_=actiontype
+ )
+ self.conn.vms.add(vmparams)
+ setMsg("VM is created")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to create VM")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def createDisk(self, vmname, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot):
+ VM = self.get_VM(vmname)
+
+ newdisk = params.Disk(
+ name=diskname,
+ size=1024 * 1024 * 1024 * int(disksize),
+ wipe_after_delete=True,
+ sparse=diskallocationtype,
+ interface=diskinterface,
+ format=diskformat,
+ bootable=diskboot,
+ storage_domains=params.StorageDomains(
+ storage_domain=[self.get_domain(diskdomain)]
+ )
+ )
+
+ try:
+ VM.disks.add(newdisk)
+ VM.update()
+ setMsg("Successfully added disk " + diskname)
+ setChanged()
+ except Exception as e:
+ setFailed()
+ setMsg("Error attaching " + diskname + "disk, please recheck and remove any leftover configuration.")
+ setMsg(str(e))
+ return False
+
+ try:
+ currentdisk = VM.disks.get(name=diskname)
+ attempt = 1
+ while currentdisk.status.state != 'ok':
+ currentdisk = VM.disks.get(name=diskname)
+ if attempt == 100:
+ setMsg("Error, disk %s, state %s" % (diskname, str(currentdisk.status.state)))
+ raise
+ else:
+ attempt += 1
+ time.sleep(2)
+ setMsg("The disk " + diskname + " is ready.")
+ except Exception as e:
+ setFailed()
+ setMsg("Error getting the state of " + diskname + ".")
+ setMsg(str(e))
+ return False
+ return True
+
+ def createNIC(self, vmname, nicname, vlan, interface):
+ VM = self.get_VM(vmname)
+ CLUSTER = self.get_cluster_byid(VM.cluster.id)
+ DC = self.get_DC_byid(CLUSTER.data_center.id)
+ newnic = params.NIC(
+ name=nicname,
+ network=DC.networks.get(name=vlan),
+ interface=interface
+ )
+
+ try:
+ VM.nics.add(newnic)
+ VM.update()
+ setMsg("Successfully added iface " + nicname)
+ setChanged()
+ except Exception as e:
+ setFailed()
+ setMsg("Error attaching " + nicname + " iface, please recheck and remove any leftover configuration.")
+ setMsg(str(e))
+ return False
+
+ try:
+ currentnic = VM.nics.get(name=nicname)
+ attempt = 1
+ while currentnic.active is not True:
+ currentnic = VM.nics.get(name=nicname)
+ if attempt == 100:
+ setMsg("Error, iface %s, state %s" % (nicname, str(currentnic.active)))
+ raise
+ else:
+ attempt += 1
+ time.sleep(2)
+ setMsg("The iface " + nicname + " is ready.")
+ except Exception as e:
+ setFailed()
+ setMsg("Error getting the state of " + nicname + ".")
+ setMsg(str(e))
+ return False
+ return True
+
+ def get_DC(self, dc_name):
+ return self.conn.datacenters.get(name=dc_name)
+
+ def get_DC_byid(self, dc_id):
+ return self.conn.datacenters.get(id=dc_id)
+
+ def get_VM(self, vm_name):
+ return self.conn.vms.get(name=vm_name)
+
+ def get_cluster_byid(self, cluster_id):
+ return self.conn.clusters.get(id=cluster_id)
+
+ def get_cluster(self, cluster_name):
+ return self.conn.clusters.get(name=cluster_name)
+
+ def get_domain_byid(self, dom_id):
+ return self.conn.storagedomains.get(id=dom_id)
+
+ def get_domain(self, domain_name):
+ return self.conn.storagedomains.get(name=domain_name)
+
+ def get_disk(self, disk):
+ return self.conn.disks.get(disk)
+
+ def get_network(self, dc_name, network_name):
+ return self.get_DC(dc_name).networks.get(network_name)
+
+ def get_network_byid(self, network_id):
+ return self.conn.networks.get(id=network_id)
+
+ def get_NIC(self, vm_name, nic_name):
+ return self.get_VM(vm_name).nics.get(nic_name)
+
+ def get_Host(self, host_name):
+ return self.conn.hosts.get(name=host_name)
+
+ def get_Host_byid(self, host_id):
+ return self.conn.hosts.get(id=host_id)
+
+ def set_Memory(self, name, memory):
+ VM = self.get_VM(name)
+ VM.memory = int(int(memory) * 1024 * 1024 * 1024)
+ try:
+ VM.update()
+ setMsg("The Memory has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update memory.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_Memory_Policy(self, name, memory_policy):
+ VM = self.get_VM(name)
+ VM.memory_policy.guaranteed = int(int(memory_policy) * 1024 * 1024 * 1024)
+ try:
+ VM.update()
+ setMsg("The memory policy has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update memory policy.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_CPU(self, name, cpu):
+ VM = self.get_VM(name)
+ VM.cpu.topology.cores = int(cpu)
+ try:
+ VM.update()
+ setMsg("The number of CPUs has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update the number of CPUs.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_CPU_share(self, name, cpu_share):
+ VM = self.get_VM(name)
+ VM.cpu_shares = int(cpu_share)
+ try:
+ VM.update()
+ setMsg("The CPU share has been updated.")
+ setChanged()
+ return True
+ except Exception as e:
+ setMsg("Failed to update the CPU share.")
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ def set_Disk(self, diskname, disksize, diskinterface, diskboot):
+ DISK = self.get_disk(diskname)
+ setMsg("Checking disk " + diskname)
+ if DISK.get_bootable() != diskboot:
+ try:
+ DISK.set_bootable(diskboot)
+ setMsg("Updated the boot option on the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to set the boot option on the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ else:
+ setMsg("The boot option of the disk is correct")
+ if int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)):
+ try:
+ DISK.size = (1024 * 1024 * 1024 * int(disksize))
+ setMsg("Updated the size of the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the size of the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ elif int(DISK.size) < (1024 * 1024 * 1024 * int(disksize)):
+ setMsg("Shrinking disks is not supported")
+ setMsg(str(e))
+ setFailed()
+ return False
+ else:
+ setMsg("The size of the disk is correct")
+ if str(DISK.interface) != str(diskinterface):
+ try:
+ DISK.interface = diskinterface
+ setMsg("Updated the interface of the disk.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the interface of the disk.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ else:
+ setMsg("The interface of the disk is correct")
+ return True
+
+ def set_NIC(self, vmname, nicname, newname, vlan, interface):
+ NIC = self.get_NIC(vmname, nicname)
+ VM = self.get_VM(vmname)
+ CLUSTER = self.get_cluster_byid(VM.cluster.id)
+ DC = self.get_DC_byid(CLUSTER.data_center.id)
+ NETWORK = self.get_network(str(DC.name), vlan)
+ checkFail()
+ if NIC.name != newname:
+ NIC.name = newname
+ setMsg('Updating iface name to ' + newname)
+ setChanged()
+ if str(NIC.network.id) != str(NETWORK.id):
+ NIC.set_network(NETWORK)
+ setMsg('Updating iface network to ' + vlan)
+ setChanged()
+ if NIC.interface != interface:
+ NIC.interface = interface
+ setMsg('Updating iface interface to ' + interface)
+ setChanged()
+ try:
+ NIC.update()
+ setMsg('iface has succesfully been updated.')
+ except Exception as e:
+ setMsg("Failed to update the iface.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_DeleteProtection(self, vmname, del_prot):
+ VM = self.get_VM(vmname)
+ VM.delete_protected = del_prot
+ try:
+ VM.update()
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update delete protection.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_BootOrder(self, vmname, boot_order):
+ VM = self.get_VM(vmname)
+ bootorder = []
+ for device in boot_order:
+ bootorder.append(params.Boot(dev=device))
+ VM.os.boot = bootorder
+
+ try:
+ VM.update()
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to update the boot order.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_Host(self, host_name, cluster, ifaces):
+ HOST = self.get_Host(host_name)
+ CLUSTER = self.get_cluster(cluster)
+
+ if HOST is None:
+ setMsg("Host does not exist.")
+ ifacelist = dict()
+ networklist = []
+ manageip = ''
+
+ try:
+ for iface in ifaces:
+ try:
+ setMsg('creating host interface ' + iface['name'])
+ if 'management' in iface:
+ manageip = iface['ip']
+ if 'boot_protocol' not in iface:
+ if 'ip' in iface:
+ iface['boot_protocol'] = 'static'
+ else:
+ iface['boot_protocol'] = 'none'
+ if 'ip' not in iface:
+ iface['ip'] = ''
+ if 'netmask' not in iface:
+ iface['netmask'] = ''
+ if 'gateway' not in iface:
+ iface['gateway'] = ''
+
+ if 'network' in iface:
+ if 'bond' in iface:
+ bond = []
+ for slave in iface['bond']:
+ bond.append(ifacelist[slave])
+ try:
+ tmpiface = params.Bonding(
+ slaves = params.Slaves(host_nic = bond),
+ options = params.Options(
+ option = [
+ params.Option(name = 'miimon', value = '100'),
+ params.Option(name = 'mode', value = '4')
+ ]
+ )
+ )
+ except Exception as e:
+ setMsg('Failed to create the bond for ' + iface['name'])
+ setFailed()
+ setMsg(str(e))
+ return False
+ try:
+ tmpnetwork = params.HostNIC(
+ network = params.Network(name = iface['network']),
+ name = iface['name'],
+ boot_protocol = iface['boot_protocol'],
+ ip = params.IP(
+ address = iface['ip'],
+ netmask = iface['netmask'],
+ gateway = iface['gateway']
+ ),
+ override_configuration = True,
+ bonding = tmpiface)
+ networklist.append(tmpnetwork)
+ setMsg('Applying network ' + iface['name'])
+ except Exception as e:
+ setMsg('Failed to set' + iface['name'] + ' as network interface')
+ setFailed()
+ setMsg(str(e))
+ return False
+ else:
+ tmpnetwork = params.HostNIC(
+ network = params.Network(name = iface['network']),
+ name = iface['name'],
+ boot_protocol = iface['boot_protocol'],
+ ip = params.IP(
+ address = iface['ip'],
+ netmask = iface['netmask'],
+ gateway = iface['gateway']
+ ))
+ networklist.append(tmpnetwork)
+ setMsg('Applying network ' + iface['name'])
+ else:
+ tmpiface = params.HostNIC(
+ name=iface['name'],
+ network=params.Network(),
+ boot_protocol=iface['boot_protocol'],
+ ip=params.IP(
+ address=iface['ip'],
+ netmask=iface['netmask'],
+ gateway=iface['gateway']
+ ))
+ ifacelist[iface['name']] = tmpiface
+ except Exception as e:
+ setMsg('Failed to set ' + iface['name'])
+ setFailed()
+ setMsg(str(e))
+ return False
+ except Exception as e:
+ setMsg('Failed to set networks')
+ setMsg(str(e))
+ setFailed()
+ return False
+
+ if manageip == '':
+ setMsg('No management network is defined')
+ setFailed()
+ return False
+
+ try:
+ HOST = params.Host(name=host_name, address=manageip, cluster=CLUSTER, ssh=params.SSH(authentication_method='publickey'))
+ if self.conn.hosts.add(HOST):
+ setChanged()
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ while (state != 'non_operational' and state != 'up'):
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ if state == 'non_responsive':
+ setMsg('Failed to add host to RHEVM')
+ setFailed()
+ return False
+
+ setMsg('status host: up')
+ time.sleep(5)
+
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ setMsg('State before setting to maintenance: ' + str(state))
+ HOST.deactivate()
+ while state != 'maintenance':
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ setMsg('status host: maintenance')
+
+ try:
+ HOST.nics.setupnetworks(params.Action(
+ force=True,
+ check_connectivity = False,
+ host_nics = params.HostNics(host_nic = networklist)
+ ))
+ setMsg('nics are set')
+ except Exception as e:
+ setMsg('Failed to apply networkconfig')
+ setFailed()
+ setMsg(str(e))
+ return False
+
+ try:
+ HOST.commitnetconfig()
+ setMsg('Network config is saved')
+ except Exception as e:
+ setMsg('Failed to save networkconfig')
+ setFailed()
+ setMsg(str(e))
+ return False
+ except Exception as e:
+ if 'The Host name is already in use' in str(e):
+ setMsg("Host already exists")
+ else:
+ setMsg("Failed to add host")
+ setFailed()
+ setMsg(str(e))
+ return False
+
+ HOST.activate()
+ while state != 'up':
+ HOST = self.get_Host(host_name)
+ state = HOST.status.state
+ time.sleep(1)
+ if state == 'non_responsive':
+ setMsg('Failed to apply networkconfig.')
+ setFailed()
+ return False
+ setMsg('status host: up')
+ else:
+ setMsg("Host exists.")
+
+ return True
+
+ def del_NIC(self, vmname, nicname):
+ return self.get_NIC(vmname, nicname).delete()
+
+ def remove_VM(self, vmname):
+ VM = self.get_VM(vmname)
+ try:
+ VM.delete()
+ except Exception as e:
+ setMsg("Failed to remove VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def start_VM(self, vmname, timeout):
+ VM = self.get_VM(vmname)
+ try:
+ VM.start()
+ except Exception as e:
+ setMsg("Failed to start VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return self.wait_VM(vmname, "up", timeout)
+
+ def wait_VM(self, vmname, state, timeout):
+ VM = self.get_VM(vmname)
+ while VM.status.state != state:
+ VM = self.get_VM(vmname)
+ time.sleep(10)
+ if timeout is not False:
+ timeout -= 10
+ if timeout <= 0:
+ setMsg("Timeout expired")
+ setFailed()
+ return False
+ return True
+
+ def stop_VM(self, vmname, timeout):
+ VM = self.get_VM(vmname)
+ try:
+ VM.stop()
+ except Exception as e:
+ setMsg("Failed to stop VM.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return self.wait_VM(vmname, "down", timeout)
+
+ def set_CD(self, vmname, cd_drive):
+ VM = self.get_VM(vmname)
+ try:
+ if str(VM.status.state) == 'down':
+ cdrom = params.CdRom(file=cd_iso)
+ VM.cdroms.add(cdrom)
+ setMsg("Attached the image.")
+ setChanged()
+ else:
+ cdrom = VM.cdroms.get(id="00000000-0000-0000-0000-000000000000")
+ cdrom.set_file(cd_iso)
+ cdrom.update(current=True)
+ setMsg("Attached the image.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to attach image.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def set_VM_Host(self, vmname, vmhost):
+ VM = self.get_VM(vmname)
+ HOST = self.get_Host(vmhost)
+ try:
+ VM.placement_policy.host = HOST
+ VM.update()
+ setMsg("Set startup host to " + vmhost)
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to set startup host.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def migrate_VM(self, vmname, vmhost):
+ VM = self.get_VM(vmname)
+
+ HOST = self.get_Host_byid(VM.host.id)
+ if str(HOST.name) != vmhost:
+ try:
+ vm.migrate(
+ action=params.Action(
+ host=params.Host(
+ name=vmhost,
+ )
+ ),
+ )
+ setChanged()
+ setMsg("VM migrated to " + vmhost)
+ except Exception as e:
+ setMsg("Failed to set startup host.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+ def remove_CD(self, vmname):
+ VM = self.get_VM(vmname)
+ try:
+ VM.cdroms.get(id="00000000-0000-0000-0000-000000000000").delete()
+ setMsg("Removed the image.")
+ setChanged()
+ except Exception as e:
+ setMsg("Failed to remove the image.")
+ setMsg(str(e))
+ setFailed()
+ return False
+ return True
+
+
+class RHEV(object):
+ def __init__(self, module):
+ self.module = module
+
+ def __get_conn(self):
+ self.conn = RHEVConn(self.module)
+ return self.conn
+
+ def test(self):
+ self.__get_conn()
+ return "OK"
+
+ def getVM(self, name):
+ self.__get_conn()
+ VM = self.conn.get_VM(name)
+ if VM:
+ vminfo = dict()
+ vminfo['uuid'] = VM.id
+ vminfo['name'] = VM.name
+ vminfo['status'] = VM.status.state
+ vminfo['cpu_cores'] = VM.cpu.topology.cores
+ vminfo['cpu_sockets'] = VM.cpu.topology.sockets
+ vminfo['cpu_shares'] = VM.cpu_shares
+ vminfo['memory'] = (int(VM.memory) / 1024 / 1024 / 1024)
+ vminfo['mem_pol'] = (int(VM.memory_policy.guaranteed) / 1024 / 1024 / 1024)
+ vminfo['os'] = VM.get_os().type_
+ vminfo['del_prot'] = VM.delete_protected
+ try:
+ vminfo['host'] = str(self.conn.get_Host_byid(str(VM.host.id)).name)
+ except Exception as e:
+ vminfo['host'] = None
+ vminfo['boot_order'] = []
+ for boot_dev in VM.os.get_boot():
+ vminfo['boot_order'].append(str(boot_dev.dev))
+ vminfo['disks'] = []
+ for DISK in VM.disks.list():
+ disk = dict()
+ disk['name'] = DISK.name
+ disk['size'] = (int(DISK.size) / 1024 / 1024 / 1024)
+ disk['domain'] = str((self.conn.get_domain_byid(DISK.get_storage_domains().get_storage_domain()[0].id)).name)
+ disk['interface'] = DISK.interface
+ vminfo['disks'].append(disk)
+ vminfo['ifaces'] = []
+ for NIC in VM.nics.list():
+ iface = dict()
+ iface['name'] = str(NIC.name)
+ iface['vlan'] = str(self.conn.get_network_byid(NIC.get_network().id).name)
+ iface['interface'] = NIC.interface
+ iface['mac'] = NIC.mac.address
+ vminfo['ifaces'].append(iface)
+ vminfo[str(NIC.name)] = NIC.mac.address
+ CLUSTER = self.conn.get_cluster_byid(VM.cluster.id)
+ if CLUSTER:
+ vminfo['cluster'] = CLUSTER.name
+ else:
+ vminfo = False
+ return vminfo
+
+ def createVMimage(self, name, cluster, template, disks):
+ self.__get_conn()
+ return self.conn.createVMimage(name, cluster, template, disks)
+
+ def createVM(self, name, cluster, os, actiontype):
+ self.__get_conn()
+ return self.conn.createVM(name, cluster, os, actiontype)
+
+ def setMemory(self, name, memory):
+ self.__get_conn()
+ return self.conn.set_Memory(name, memory)
+
+ def setMemoryPolicy(self, name, memory_policy):
+ self.__get_conn()
+ return self.conn.set_Memory_Policy(name, memory_policy)
+
+ def setCPU(self, name, cpu):
+ self.__get_conn()
+ return self.conn.set_CPU(name, cpu)
+
+ def setCPUShare(self, name, cpu_share):
+ self.__get_conn()
+ return self.conn.set_CPU_share(name, cpu_share)
+
+ def setDisks(self, name, disks):
+ self.__get_conn()
+ counter = 0
+ bootselect = False
+ for disk in disks:
+ if 'bootable' in disk:
+ if disk['bootable'] is True:
+ bootselect = True
+
+ for disk in disks:
+ diskname = name + "_Disk" + str(counter) + "_" + disk.get('name', '').replace('/', '_')
+ disksize = disk.get('size', 1)
+ diskdomain = disk.get('domain', None)
+ if diskdomain is None:
+ setMsg("`domain` is a required disk key.")
+ setFailed()
+ return False
+ diskinterface = disk.get('interface', 'virtio')
+ diskformat = disk.get('format', 'raw')
+ diskallocationtype = disk.get('thin', False)
+ diskboot = disk.get('bootable', False)
+
+ if bootselect is False and counter == 0:
+ diskboot = True
+
+ DISK = self.conn.get_disk(diskname)
+
+ if DISK is None:
+ self.conn.createDisk(name, diskname, disksize, diskdomain, diskinterface, diskformat, diskallocationtype, diskboot)
+ else:
+ self.conn.set_Disk(diskname, disksize, diskinterface, diskboot)
+ checkFail()
+ counter += 1
+
+ return True
+
+ def setNetworks(self, vmname, ifaces):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+
+ counter = 0
+ length = len(ifaces)
+
+ for NIC in VM.nics.list():
+ if counter < length:
+ iface = ifaces[counter]
+ name = iface.get('name', None)
+ if name is None:
+ setMsg("`name` is a required iface key.")
+ setFailed()
+ elif str(name) != str(NIC.name):
+ setMsg("ifaces are in the wrong order, rebuilding everything.")
+ for NIC in VM.nics.list():
+ self.conn.del_NIC(vmname, NIC.name)
+ self.setNetworks(vmname, ifaces)
+ checkFail()
+ return True
+ vlan = iface.get('vlan', None)
+ if vlan is None:
+ setMsg("`vlan` is a required iface key.")
+ setFailed()
+ checkFail()
+ interface = iface.get('interface', 'virtio')
+ self.conn.set_NIC(vmname, str(NIC.name), name, vlan, interface)
+ else:
+ self.conn.del_NIC(vmname, NIC.name)
+ counter += 1
+ checkFail()
+
+ while counter < length:
+ iface = ifaces[counter]
+ name = iface.get('name', None)
+ if name is None:
+ setMsg("`name` is a required iface key.")
+ setFailed()
+ vlan = iface.get('vlan', None)
+ if vlan is None:
+ setMsg("`vlan` is a required iface key.")
+ setFailed()
+ if failed is True:
+ return False
+ interface = iface.get('interface', 'virtio')
+ self.conn.createNIC(vmname, name, vlan, interface)
+
+ counter += 1
+ checkFail()
+ return True
+
+ def setDeleteProtection(self, vmname, del_prot):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ if bool(VM.delete_protected) != bool(del_prot):
+ self.conn.set_DeleteProtection(vmname, del_prot)
+ checkFail()
+ setMsg("`delete protection` has been updated.")
+ else:
+ setMsg("`delete protection` already has the right value.")
+ return True
+
+ def setBootOrder(self, vmname, boot_order):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ bootorder = []
+ for boot_dev in VM.os.get_boot():
+ bootorder.append(str(boot_dev.dev))
+
+ if boot_order != bootorder:
+ self.conn.set_BootOrder(vmname, boot_order)
+ setMsg('The boot order has been set')
+ else:
+ setMsg('The boot order has already been set')
+ return True
+
+ def removeVM(self, vmname):
+ self.__get_conn()
+ self.setPower(vmname, "down", 300)
+ return self.conn.remove_VM(vmname)
+
+ def setPower(self, vmname, state, timeout):
+ self.__get_conn()
+ VM = self.conn.get_VM(vmname)
+ if VM is None:
+ setMsg("VM does not exist.")
+ setFailed()
+ return False
+
+ if state == VM.status.state:
+ setMsg("VM state was already " + state)
+ else:
+ if state == "up":
+ setMsg("VM is going to start")
+ self.conn.start_VM(vmname, timeout)
+ setChanged()
+ elif state == "down":
+ setMsg("VM is going to stop")
+ self.conn.stop_VM(vmname, timeout)
+ setChanged()
+ elif state == "restarted":
+ self.setPower(vmname, "down", timeout)
+ checkFail()
+ self.setPower(vmname, "up", timeout)
+ checkFail()
+ setMsg("the vm state is set to " + state)
+ return True
+
+ def setCD(self, vmname, cd_drive):
+ self.__get_conn()
+ if cd_drive:
+ return self.conn.set_CD(vmname, cd_drive)
+ else:
+ return self.conn.remove_CD(vmname)
+
+ def setVMHost(self, vmname, vmhost):
+ self.__get_conn()
+ return self.conn.set_VM_Host(vmname, vmhost)
+
+ VM = self.conn.get_VM(vmname)
+ HOST = self.conn.get_Host(vmhost)
+
+ if VM.placement_policy.host is None:
+ self.conn.set_VM_Host(vmname, vmhost)
+ elif str(VM.placement_policy.host.id) != str(HOST.id):
+ self.conn.set_VM_Host(vmname, vmhost)
+ else:
+ setMsg("VM's startup host was already set to " + vmhost)
+ checkFail()
+
+ if str(VM.status.state) == "up":
+ self.conn.migrate_VM(vmname, vmhost)
+ checkFail()
+
+ return True
+
+ def setHost(self, hostname, cluster, ifaces):
+ self.__get_conn()
+ return self.conn.set_Host(hostname, cluster, ifaces)
+
+
+def checkFail():
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ return True
+
+
+def setFailed():
+ global failed
+ failed = True
+
+
+def setChanged():
+ global changed
+ changed = True
+
+
+def setMsg(message):
+ global failed
+ msg.append(message)
+
+
+def core(module):
+
+ r = RHEV(module)
+
+ state = module.params.get('state', 'present')
+
+ if state == 'ping':
+ r.test()
+ return RHEV_SUCCESS, {"ping": "pong"}
+ elif state == 'info':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ vminfo = r.getVM(name)
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+ elif state == 'present':
+ created = False
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ actiontype = module.params.get('type')
+ if actiontype == 'server' or actiontype == 'desktop':
+ vminfo = r.getVM(name)
+ if vminfo:
+ setMsg('VM exists')
+ else:
+ # Create VM
+ cluster = module.params.get('cluster')
+ if cluster is None:
+ setMsg("cluster is a required argument.")
+ setFailed()
+ template = module.params.get('image')
+ if template:
+ disks = module.params.get('disks')
+ if disks is None:
+ setMsg("disks is a required argument.")
+ setFailed()
+ checkFail()
+ if r.createVMimage(name, cluster, template, disks) is False:
+ return RHEV_FAILED, vminfo
+ else:
+ os = module.params.get('osver')
+ if os is None:
+ setMsg("osver is a required argument.")
+ setFailed()
+ checkFail()
+ if r.createVM(name, cluster, os, actiontype) is False:
+ return RHEV_FAILED, vminfo
+ created = True
+
+ # Set MEMORY and MEMORY POLICY
+ vminfo = r.getVM(name)
+ memory = module.params.get('vmmem')
+ if memory is not None:
+ memory_policy = module.params.get('mempol')
+ if int(memory_policy) == 0:
+ memory_policy = memory
+ mem_pol_nok = True
+ if int(vminfo['mem_pol']) == int(memory_policy):
+ setMsg("Memory is correct")
+ mem_pol_nok = False
+
+ mem_nok = True
+ if int(vminfo['memory']) == int(memory):
+ setMsg("Memory is correct")
+ mem_nok = False
+
+ if memory_policy > memory:
+ setMsg('memory_policy cannot have a higher value than memory.')
+ return RHEV_FAILED, msg
+
+ if mem_nok and mem_pol_nok:
+ if int(memory_policy) > int(vminfo['memory']):
+ r.setMemory(vminfo['name'], memory)
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ else:
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ r.setMemory(vminfo['name'], memory)
+ elif mem_nok:
+ r.setMemory(vminfo['name'], memory)
+ elif mem_pol_nok:
+ r.setMemoryPolicy(vminfo['name'], memory_policy)
+ checkFail()
+
+ # Set CPU
+ cpu = module.params.get('vmcpu')
+ if int(vminfo['cpu_cores']) == int(cpu):
+ setMsg("Number of CPUs is correct")
+ else:
+ if r.setCPU(vminfo['name'], cpu) is False:
+ return RHEV_FAILED, msg
+
+ # Set CPU SHARE
+ cpu_share = module.params.get('cpu_share')
+ if cpu_share is not None:
+ if int(vminfo['cpu_shares']) == int(cpu_share):
+ setMsg("CPU share is correct.")
+ else:
+ if r.setCPUShare(vminfo['name'], cpu_share) is False:
+ return RHEV_FAILED, msg
+
+ # Set DISKS
+ disks = module.params.get('disks')
+ if disks is not None:
+ if r.setDisks(vminfo['name'], disks) is False:
+ return RHEV_FAILED, msg
+
+ # Set NETWORKS
+ ifaces = module.params.get('ifaces', None)
+ if ifaces is not None:
+ if r.setNetworks(vminfo['name'], ifaces) is False:
+ return RHEV_FAILED, msg
+
+ # Set Delete Protection
+ del_prot = module.params.get('del_prot')
+ if r.setDeleteProtection(vminfo['name'], del_prot) is False:
+ return RHEV_FAILED, msg
+
+ # Set Boot Order
+ boot_order = module.params.get('boot_order')
+ if r.setBootOrder(vminfo['name'], boot_order) is False:
+ return RHEV_FAILED, msg
+
+ # Set VM Host
+ vmhost = module.params.get('vmhost')
+ if vmhost is not False and vmhost is not "False":
+ if r.setVMHost(vminfo['name'], vmhost) is False:
+ return RHEV_FAILED, msg
+
+ vminfo = r.getVM(name)
+ vminfo['created'] = created
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ if actiontype == 'host':
+ cluster = module.params.get('cluster')
+ if cluster is None:
+ setMsg("cluster is a required argument.")
+ setFailed()
+ ifaces = module.params.get('ifaces')
+ if ifaces is None:
+ setMsg("ifaces is a required argument.")
+ setFailed()
+ if r.setHost(name, cluster, ifaces) is False:
+ return RHEV_FAILED, msg
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
+
+ elif state == 'absent':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ actiontype = module.params.get('type')
+ if actiontype == 'server' or actiontype == 'desktop':
+ vminfo = r.getVM(name)
+ if vminfo:
+ setMsg('VM exists')
+
+ # Set Delete Protection
+ del_prot = module.params.get('del_prot')
+ if r.setDeleteProtection(vminfo['name'], del_prot) is False:
+ return RHEV_FAILED, msg
+
+ # Remove VM
+ if r.removeVM(vminfo['name']) is False:
+ return RHEV_FAILED, msg
+ setMsg('VM has been removed.')
+ vminfo['state'] = 'DELETED'
+ else:
+ setMsg('VM was already removed.')
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ elif state == 'up' or state == 'down' or state == 'restarted':
+ name = module.params.get('name')
+ if not name:
+ setMsg("`name` is a required argument.")
+ return RHEV_FAILED, msg
+ timeout = module.params.get('timeout')
+ if r.setPower(name, state, timeout) is False:
+ return RHEV_FAILED, msg
+ vminfo = r.getVM(name)
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg, 'vm': vminfo}
+
+ elif state == 'cd':
+ name = module.params.get('name')
+ cd_drive = module.params.get('cd_drive')
+ if r.setCD(name, cd_drive) is False:
+ return RHEV_FAILED, msg
+ return RHEV_SUCCESS, {'changed': changed, 'msg': msg}
+
+
+def main():
+ global module
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default='present', choices=['ping', 'present', 'absent', 'up', 'down', 'restarted', 'cd', 'info']),
+ user = dict(default="admin@internal"),
+ password = dict(required=True),
+ server = dict(default="127.0.0.1"),
+ port = dict(default="443"),
+ insecure_api = dict(default=False, type='bool'),
+ name = dict(),
+ image = dict(default=False),
+ datacenter = dict(default="Default"),
+ type = dict(default="server", choices=['server', 'desktop', 'host']),
+ cluster = dict(default=''),
+ vmhost = dict(default=False),
+ vmcpu = dict(default="2"),
+ vmmem = dict(default="1"),
+ disks = dict(),
+ osver = dict(default="rhel_6x64"),
+ ifaces = dict(aliases=['nics', 'interfaces']),
+ timeout = dict(default=False),
+ mempol = dict(default="1"),
+ vm_ha = dict(default=True),
+ cpu_share = dict(default="0"),
+ boot_order = dict(default=["network", "hd"]),
+ del_prot = dict(default=True, type="bool"),
+ cd_drive = dict(default=False)
+ ),
+ )
+
+ if not HAS_SDK:
+ module.fail_json(
+ msg='The `ovirtsdk` module is not importable. Check the requirements.'
+ )
+
+ rc = RHEV_SUCCESS
+ try:
+ rc, result = core(module)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=result)
+ else:
+ module.exit_json(**result)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/misc/virt.py b/lib/ansible/modules/cloud/misc/virt.py
new file mode 100644
index 0000000000..3e9c098f3d
--- /dev/null
+++ b/lib/ansible/modules/cloud/misc/virt.py
@@ -0,0 +1,538 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+Virt management features
+
+Copyright 2007, 2012 Red Hat, Inc
+Michael DeHaan <michael.dehaan@gmail.com>
+Seth Vidal <skvidal@fedoraproject.org>
+
+This software may be freely redistributed under the terms of the GNU
+general public license.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: virt
+short_description: Manages virtual machines supported by libvirt
+description:
+ - Manages virtual machines supported by I(libvirt).
+version_added: "0.2"
+options:
+ name:
+ description:
+ - name of the guest VM being managed. Note that VM must be previously
+ defined with xml.
+ required: true
+ default: null
+ aliases: []
+ state:
+ description:
+ - Note that there may be some lag for state requests like C(shutdown)
+ since these refer only to VM states. After starting a guest, it may not
+ be immediately accessible.
+ required: false
+ choices: [ "running", "shutdown", "destroyed", "paused" ]
+ default: "no"
+ command:
+ description:
+ - in addition to state management, various non-idempotent commands are available. See examples
+ required: false
+ choices: ["create","status", "start", "stop", "pause", "unpause",
+ "shutdown", "undefine", "destroy", "get_xml", "autostart",
+ "freemem", "list_vms", "info", "nodeinfo", "virttype", "define"]
+ uri:
+ description:
+ - libvirt connection uri
+ required: false
+ defaults: qemu:///system
+ xml:
+ description:
+ - XML document used with the define command
+ required: false
+ default: null
+requirements:
+ - "python >= 2.6"
+ - "libvirt-python"
+author:
+ - "Ansible Core Team"
+ - "Michael DeHaan"
+ - "Seth Vidal"
+'''
+
+EXAMPLES = '''
+# a playbook task line:
+- virt:
+ name: alpha
+ state: running
+
+# /usr/bin/ansible invocations
+ansible host -m virt -a "name=alpha command=status"
+ansible host -m virt -a "name=alpha command=get_xml"
+ansible host -m virt -a "name=alpha command=create uri=lxc:///"
+
+# a playbook example of defining and launching an LXC guest
+tasks:
+ - name: define vm
+ virt:
+ name: foo
+ command: define
+ xml: '{{ lookup('template', 'container-template.xml.j2') }}'
+ uri: 'lxc:///'
+ - name: start vm
+ virt:
+ name: foo
+ state: running
+ uri: 'lxc:///'
+'''
+
+RETURN = '''
+# for list_vms command
+list_vms:
+ description: The list of vms defined on the remote system
+ type: dictionary
+ returned: success
+ sample: [
+ "build.example.org",
+ "dev.example.org"
+ ]
+# for status command
+status:
+ description: The status of the VM, among running, crashed, paused and shutdown
+ type: string
+ sample: "success"
+ returned: success
+'''
+VIRT_FAILED = 1
+VIRT_SUCCESS = 0
+VIRT_UNAVAILABLE=2
+
+import sys
+
+try:
+ import libvirt
+except ImportError:
+ HAS_VIRT = False
+else:
+ HAS_VIRT = True
+
+ALL_COMMANDS = []
+VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause',
+ 'shutdown', 'undefine', 'destroy', 'get_xml', 'autostart', 'define']
+HOST_COMMANDS = ['freemem', 'list_vms', 'info', 'nodeinfo', 'virttype']
+ALL_COMMANDS.extend(VM_COMMANDS)
+ALL_COMMANDS.extend(HOST_COMMANDS)
+
+VIRT_STATE_NAME_MAP = {
+ 0 : "running",
+ 1 : "running",
+ 2 : "running",
+ 3 : "paused",
+ 4 : "shutdown",
+ 5 : "shutdown",
+ 6 : "crashed"
+}
+
+class VMNotFound(Exception):
+ pass
+
+class LibvirtConnection(object):
+
+ def __init__(self, uri, module):
+
+ self.module = module
+
+ cmd = "uname -r"
+ rc, stdout, stderr = self.module.run_command(cmd)
+
+ if "xen" in stdout:
+ conn = libvirt.open(None)
+ elif "esx" in uri:
+ auth = [[libvirt.VIR_CRED_AUTHNAME, libvirt.VIR_CRED_NOECHOPROMPT], [], None]
+ conn = libvirt.openAuth(uri, auth)
+ else:
+ conn = libvirt.open(uri)
+
+ if not conn:
+ raise Exception("hypervisor connection failure")
+
+ self.conn = conn
+
+ def find_vm(self, vmid):
+ """
+ Extra bonus feature: vmid = -1 returns a list of everything
+ """
+ conn = self.conn
+
+ vms = []
+
+ # this block of code borrowed from virt-manager:
+ # get working domain's name
+ ids = conn.listDomainsID()
+ for id in ids:
+ vm = conn.lookupByID(id)
+ vms.append(vm)
+ # get defined domain
+ names = conn.listDefinedDomains()
+ for name in names:
+ vm = conn.lookupByName(name)
+ vms.append(vm)
+
+ if vmid == -1:
+ return vms
+
+ for vm in vms:
+ if vm.name() == vmid:
+ return vm
+
+ raise VMNotFound("virtual machine %s not found" % vmid)
+
+ def shutdown(self, vmid):
+ return self.find_vm(vmid).shutdown()
+
+ def pause(self, vmid):
+ return self.suspend(self.conn,vmid)
+
+ def unpause(self, vmid):
+ return self.resume(self.conn,vmid)
+
+ def suspend(self, vmid):
+ return self.find_vm(vmid).suspend()
+
+ def resume(self, vmid):
+ return self.find_vm(vmid).resume()
+
+ def create(self, vmid):
+ return self.find_vm(vmid).create()
+
+ def destroy(self, vmid):
+ return self.find_vm(vmid).destroy()
+
+ def undefine(self, vmid):
+ return self.find_vm(vmid).undefine()
+
+ def get_status2(self, vm):
+ state = vm.info()[0]
+ return VIRT_STATE_NAME_MAP.get(state,"unknown")
+
+ def get_status(self, vmid):
+ state = self.find_vm(vmid).info()[0]
+ return VIRT_STATE_NAME_MAP.get(state,"unknown")
+
+ def nodeinfo(self):
+ return self.conn.getInfo()
+
+ def get_type(self):
+ return self.conn.getType()
+
+ def get_xml(self, vmid):
+ vm = self.conn.lookupByName(vmid)
+ return vm.XMLDesc(0)
+
+ def get_maxVcpus(self, vmid):
+ vm = self.conn.lookupByName(vmid)
+ return vm.maxVcpus()
+
+ def get_maxMemory(self, vmid):
+ vm = self.conn.lookupByName(vmid)
+ return vm.maxMemory()
+
+ def getFreeMemory(self):
+ return self.conn.getFreeMemory()
+
+ def get_autostart(self, vmid):
+ vm = self.conn.lookupByName(vmid)
+ return vm.autostart()
+
+ def set_autostart(self, vmid, val):
+ vm = self.conn.lookupByName(vmid)
+ return vm.setAutostart(val)
+
+ def define_from_xml(self, xml):
+ return self.conn.defineXML(xml)
+
+
+class Virt(object):
+
+ def __init__(self, uri, module):
+ self.module = module
+ self.uri = uri
+
+ def __get_conn(self):
+ self.conn = LibvirtConnection(self.uri, self.module)
+ return self.conn
+
+ def get_vm(self, vmid):
+ self.__get_conn()
+ return self.conn.find_vm(vmid)
+
+ def state(self):
+ vms = self.list_vms()
+ state = []
+ for vm in vms:
+ state_blurb = self.conn.get_status(vm)
+ state.append("%s %s" % (vm,state_blurb))
+ return state
+
+ def info(self):
+ vms = self.list_vms()
+ info = dict()
+ for vm in vms:
+ data = self.conn.find_vm(vm).info()
+ # libvirt returns maxMem, memory, and cpuTime as long()'s, which
+ # xmlrpclib tries to convert to regular int's during serialization.
+ # This throws exceptions, so convert them to strings here and
+ # assume the other end of the xmlrpc connection can figure things
+ # out or doesn't care.
+ info[vm] = {
+ "state" : VIRT_STATE_NAME_MAP.get(data[0],"unknown"),
+ "maxMem" : str(data[1]),
+ "memory" : str(data[2]),
+ "nrVirtCpu" : data[3],
+ "cpuTime" : str(data[4]),
+ }
+ info[vm]["autostart"] = self.conn.get_autostart(vm)
+
+ return info
+
+ def nodeinfo(self):
+ self.__get_conn()
+ info = dict()
+ data = self.conn.nodeinfo()
+ info = {
+ "cpumodel" : str(data[0]),
+ "phymemory" : str(data[1]),
+ "cpus" : str(data[2]),
+ "cpumhz" : str(data[3]),
+ "numanodes" : str(data[4]),
+ "sockets" : str(data[5]),
+ "cpucores" : str(data[6]),
+ "cputhreads" : str(data[7])
+ }
+ return info
+
+ def list_vms(self, state=None):
+ self.conn = self.__get_conn()
+ vms = self.conn.find_vm(-1)
+ results = []
+ for x in vms:
+ try:
+ if state:
+ vmstate = self.conn.get_status2(x)
+ if vmstate == state:
+ results.append(x.name())
+ else:
+ results.append(x.name())
+ except:
+ pass
+ return results
+
+ def virttype(self):
+ return self.__get_conn().get_type()
+
+ def autostart(self, vmid):
+ self.conn = self.__get_conn()
+ return self.conn.set_autostart(vmid, True)
+
+ def freemem(self):
+ self.conn = self.__get_conn()
+ return self.conn.getFreeMemory()
+
+ def shutdown(self, vmid):
+ """ Make the machine with the given vmid stop running. Whatever that takes. """
+ self.__get_conn()
+ self.conn.shutdown(vmid)
+ return 0
+
+
+ def pause(self, vmid):
+ """ Pause the machine with the given vmid. """
+
+ self.__get_conn()
+ return self.conn.suspend(vmid)
+
+ def unpause(self, vmid):
+ """ Unpause the machine with the given vmid. """
+
+ self.__get_conn()
+ return self.conn.resume(vmid)
+
+ def create(self, vmid):
+ """ Start the machine via the given vmid """
+
+ self.__get_conn()
+ return self.conn.create(vmid)
+
+ def start(self, vmid):
+ """ Start the machine via the given id/name """
+
+ self.__get_conn()
+ return self.conn.create(vmid)
+
+ def destroy(self, vmid):
+ """ Pull the virtual power from the virtual domain, giving it virtually no time to virtually shut down. """
+ self.__get_conn()
+ return self.conn.destroy(vmid)
+
+ def undefine(self, vmid):
+ """ Stop a domain, and then wipe it from the face of the earth. (delete disk/config file) """
+
+ self.__get_conn()
+ return self.conn.undefine(vmid)
+
+ def status(self, vmid):
+ """
+ Return a state suitable for server consumption. Aka, codes.py values, not XM output.
+ """
+ self.__get_conn()
+ return self.conn.get_status(vmid)
+
+ def get_xml(self, vmid):
+ """
+ Receive a Vm id as input
+ Return an xml describing vm config returned by a libvirt call
+ """
+
+ self.__get_conn()
+ return self.conn.get_xml(vmid)
+
+ def get_maxVcpus(self, vmid):
+ """
+ Gets the max number of VCPUs on a guest
+ """
+
+ self.__get_conn()
+ return self.conn.get_maxVcpus(vmid)
+
+ def get_max_memory(self, vmid):
+ """
+ Gets the max memory on a guest
+ """
+
+ self.__get_conn()
+ return self.conn.get_MaxMemory(vmid)
+
+ def define(self, xml):
+ """
+ Define a guest with the given xml
+ """
+ self.__get_conn()
+ return self.conn.define_from_xml(xml)
+
+def core(module):
+
+ state = module.params.get('state', None)
+ guest = module.params.get('name', None)
+ command = module.params.get('command', None)
+ uri = module.params.get('uri', None)
+ xml = module.params.get('xml', None)
+
+ v = Virt(uri, module)
+ res = {}
+
+ if state and command=='list_vms':
+ res = v.list_vms(state=state)
+ if not isinstance(res, dict):
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ if state:
+ if not guest:
+ module.fail_json(msg = "state change requires a guest specified")
+
+ res['changed'] = False
+ if state == 'running':
+ if v.status(guest) is 'paused':
+ res['changed'] = True
+ res['msg'] = v.unpause(guest)
+ elif v.status(guest) is not 'running':
+ res['changed'] = True
+ res['msg'] = v.start(guest)
+ elif state == 'shutdown':
+ if v.status(guest) is not 'shutdown':
+ res['changed'] = True
+ res['msg'] = v.shutdown(guest)
+ elif state == 'destroyed':
+ if v.status(guest) is not 'shutdown':
+ res['changed'] = True
+ res['msg'] = v.destroy(guest)
+ elif state == 'paused':
+ if v.status(guest) is 'running':
+ res['changed'] = True
+ res['msg'] = v.pause(guest)
+ else:
+ module.fail_json(msg="unexpected state")
+
+ return VIRT_SUCCESS, res
+
+ if command:
+ if command in VM_COMMANDS:
+ if not guest:
+ module.fail_json(msg = "%s requires 1 argument: guest" % command)
+ if command == 'define':
+ if not xml:
+ module.fail_json(msg = "define requires xml argument")
+ try:
+ v.get_vm(guest)
+ except VMNotFound:
+ v.define(xml)
+ res = {'changed': True, 'created': guest}
+ return VIRT_SUCCESS, res
+ res = getattr(v, command)(guest)
+ if not isinstance(res, dict):
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ elif hasattr(v, command):
+ res = getattr(v, command)()
+ if not isinstance(res, dict):
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ else:
+ module.fail_json(msg="Command %s not recognized" % basecmd)
+
+ module.fail_json(msg="expected state or command parameter to be specified")
+
+def main():
+
+ module = AnsibleModule(argument_spec=dict(
+ name = dict(aliases=['guest']),
+ state = dict(choices=['running', 'shutdown', 'destroyed', 'paused']),
+ command = dict(choices=ALL_COMMANDS),
+ uri = dict(default='qemu:///system'),
+ xml = dict(),
+ ))
+
+ if not HAS_VIRT:
+ module.fail_json(
+ msg='The `libvirt` module is not importable. Check the requirements.'
+ )
+
+ rc = VIRT_SUCCESS
+ try:
+ rc, result = core(module)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg=str(e))
+
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=result)
+ else:
+ module.exit_json(**result)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.pycompat24 import get_exception
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/misc/virt_net.py b/lib/ansible/modules/cloud/misc/virt_net.py
new file mode 100644
index 0000000000..a37c7ca9e3
--- /dev/null
+++ b/lib/ansible/modules/cloud/misc/virt_net.py
@@ -0,0 +1,622 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Maciej Delmanowski <drybjed@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: virt_net
+author: "Maciej Delmanowski (@drybjed)"
+version_added: "2.0"
+short_description: Manage libvirt network configuration
+description:
+ - Manage I(libvirt) networks.
+options:
+ name:
+ required: true
+ aliases: ['network']
+ description:
+ - name of the network being managed. Note that network must be previously
+ defined with xml.
+ state:
+ required: false
+ choices: [ "active", "inactive", "present", "absent" ]
+ description:
+ - specify which state you want a network to be in.
+ If 'active', network will be started.
+ If 'present', ensure that network is present but do not change its
+ state; if it's missing, you need to specify xml argument.
+ If 'inactive', network will be stopped.
+ If 'undefined' or 'absent', network will be removed from I(libvirt) configuration.
+ command:
+ required: false
+ choices: [ "define", "create", "start", "stop", "destroy",
+ "undefine", "get_xml", "list_nets", "facts",
+ "info", "status", "modify"]
+ description:
+ - in addition to state management, various non-idempotent commands are available.
+ See examples.
+ Modify was added in version 2.1
+ autostart:
+ required: false
+ choices: ["yes", "no"]
+ description:
+ - Specify if a given storage pool should be started automatically on system boot.
+ uri:
+ required: false
+ default: "qemu:///system"
+ description:
+ - libvirt connection uri.
+ xml:
+ required: false
+ description:
+ - XML document used with the define command.
+requirements:
+ - "python >= 2.6"
+ - "python-libvirt"
+ - "python-lxml"
+'''
+
+EXAMPLES = '''
+# Define a new network
+- virt_net:
+ command: define
+ name: br_nat
+ xml: '{{ lookup("template", "network/bridge.xml.j2") }}'
+
+# Start a network
+- virt_net:
+ command: create
+ name: br_nat
+
+# List available networks
+- virt_net:
+ command: list_nets
+
+# Get XML data of a specified network
+- virt_net:
+ command: get_xml
+ name: br_nat
+
+# Stop a network
+- virt_net:
+ command: destroy
+ name: br_nat
+
+# Undefine a network
+- virt_net:
+ command: undefine
+ name: br_nat
+
+# Gather facts about networks
+# Facts will be available as 'ansible_libvirt_networks'
+- virt_net:
+ command: facts
+
+# Gather information about network managed by 'libvirt' remotely using uri
+- virt_net:
+ command: info
+ uri: '{{ item }}'
+ with_items: '{{ libvirt_uris }}'
+ register: networks
+
+# Ensure that a network is active (needs to be defined and built first)
+- virt_net:
+ state: active
+ name: br_nat
+
+# Ensure that a network is inactive
+- virt_net:
+ state: inactive
+ name: br_nat
+
+# Ensure that a given network will be started at boot
+- virt_net:
+ autostart: yes
+ name: br_nat
+
+# Disable autostart for a given network
+- virt_net:
+ autostart: no
+ name: br_nat
+'''
+
+VIRT_FAILED = 1
+VIRT_SUCCESS = 0
+VIRT_UNAVAILABLE=2
+
+
+try:
+ import libvirt
+except ImportError:
+ HAS_VIRT = False
+else:
+ HAS_VIRT = True
+
+try:
+ from lxml import etree
+except ImportError:
+ HAS_XML = False
+else:
+ HAS_XML = True
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+ALL_COMMANDS = []
+ENTRY_COMMANDS = ['create', 'status', 'start', 'stop',
+ 'undefine', 'destroy', 'get_xml', 'define',
+ 'modify' ]
+HOST_COMMANDS = [ 'list_nets', 'facts', 'info' ]
+ALL_COMMANDS.extend(ENTRY_COMMANDS)
+ALL_COMMANDS.extend(HOST_COMMANDS)
+
+ENTRY_STATE_ACTIVE_MAP = {
+ 0 : "inactive",
+ 1 : "active"
+}
+
+ENTRY_STATE_AUTOSTART_MAP = {
+ 0 : "no",
+ 1 : "yes"
+}
+
+ENTRY_STATE_PERSISTENT_MAP = {
+ 0 : "no",
+ 1 : "yes"
+}
+
+class EntryNotFound(Exception):
+ pass
+
+
+class LibvirtConnection(object):
+
+ def __init__(self, uri, module):
+
+ self.module = module
+
+ conn = libvirt.open(uri)
+
+ if not conn:
+ raise Exception("hypervisor connection failure")
+
+ self.conn = conn
+
+ def find_entry(self, entryid):
+ # entryid = -1 returns a list of everything
+
+ results = []
+
+ # Get active entries
+ for name in self.conn.listNetworks():
+ entry = self.conn.networkLookupByName(name)
+ results.append(entry)
+
+ # Get inactive entries
+ for name in self.conn.listDefinedNetworks():
+ entry = self.conn.networkLookupByName(name)
+ results.append(entry)
+
+ if entryid == -1:
+ return results
+
+ for entry in results:
+ if entry.name() == entryid:
+ return entry
+
+ raise EntryNotFound("network %s not found" % entryid)
+
+ def create(self, entryid):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).create()
+ else:
+ try:
+ state = self.find_entry(entryid).isActive()
+ except:
+ return self.module.exit_json(changed=True)
+ if not state:
+ return self.module.exit_json(changed=True)
+
+ def modify(self, entryid, xml):
+ network = self.find_entry(entryid)
+ # identify what type of entry is given in the xml
+ new_data = etree.fromstring(xml)
+ old_data = etree.fromstring(network.XMLDesc(0))
+ if new_data.tag == 'host':
+ mac_addr = new_data.get('mac')
+ hosts = old_data.xpath('/network/ip/dhcp/host')
+ # find the one mac we're looking for
+ host = None
+ for h in hosts:
+ if h.get('mac') == mac_addr:
+ host = h
+ break
+ if host is None:
+ # add the host
+ if not self.module.check_mode:
+ res = network.update (libvirt.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST,
+ libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
+ -1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
+ else:
+ # pretend there was a change
+ res = 0
+ if res == 0:
+ return True
+ else:
+ # change the host
+ if host.get('name') == new_data.get('name') and host.get('ip') == new_data.get('ip'):
+ return False
+ else:
+ if not self.module.check_mode:
+ res = network.update (libvirt.VIR_NETWORK_UPDATE_COMMAND_MODIFY,
+ libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST,
+ -1, xml, libvirt.VIR_NETWORK_UPDATE_AFFECT_CURRENT)
+ else:
+ # pretend there was a change
+ res = 0
+ if res == 0:
+ return True
+ # command, section, parentIndex, xml, flags=0
+ self.module.fail_json(msg='updating this is not supported yet '+unicode(xml))
+
+ def destroy(self, entryid):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).destroy()
+ else:
+ if self.find_entry(entryid).isActive():
+ return self.module.exit_json(changed=True)
+
+ def undefine(self, entryid):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).undefine()
+ else:
+ if not self.find_entry(entryid):
+ return self.module.exit_json(changed=True)
+
+ def get_status2(self, entry):
+ state = entry.isActive()
+ return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+
+ def get_status(self, entryid):
+ if not self.module.check_mode:
+ state = self.find_entry(entryid).isActive()
+ return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+ else:
+ try:
+ state = self.find_entry(entryid).isActive()
+ return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+ except:
+ return ENTRY_STATE_ACTIVE_MAP.get("inactive","unknown")
+
+ def get_uuid(self, entryid):
+ return self.find_entry(entryid).UUIDString()
+
+ def get_xml(self, entryid):
+ return self.find_entry(entryid).XMLDesc(0)
+
+ def get_forward(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ try:
+ result = xml.xpath('/network/forward')[0].get('mode')
+ except:
+ raise ValueError('Forward mode not specified')
+ return result
+
+ def get_domain(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ try:
+ result = xml.xpath('/network/domain')[0].get('name')
+ except:
+ raise ValueError('Domain not specified')
+ return result
+
+ def get_macaddress(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ try:
+ result = xml.xpath('/network/mac')[0].get('address')
+ except:
+ raise ValueError('MAC address not specified')
+ return result
+
+ def get_autostart(self, entryid):
+ state = self.find_entry(entryid).autostart()
+ return ENTRY_STATE_AUTOSTART_MAP.get(state,"unknown")
+
+ def get_autostart2(self, entryid):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).autostart()
+ else:
+ try:
+ return self.find_entry(entryid).autostart()
+ except:
+ return self.module.exit_json(changed=True)
+
+ def set_autostart(self, entryid, val):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).setAutostart(val)
+ else:
+ try:
+ state = self.find_entry(entryid).autostart()
+ except:
+ return self.module.exit_json(changed=True)
+ if bool(state) != val:
+ return self.module.exit_json(changed=True)
+
+ def get_bridge(self, entryid):
+ return self.find_entry(entryid).bridgeName()
+
+ def get_persistent(self, entryid):
+ state = self.find_entry(entryid).isPersistent()
+ return ENTRY_STATE_PERSISTENT_MAP.get(state,"unknown")
+
+ def define_from_xml(self, entryid, xml):
+ if not self.module.check_mode:
+ return self.conn.networkDefineXML(xml)
+ else:
+ try:
+ self.find_entry(entryid)
+ except:
+ return self.module.exit_json(changed=True)
+
+
+class VirtNetwork(object):
+
+ def __init__(self, uri, module):
+ self.module = module
+ self.uri = uri
+ self.conn = LibvirtConnection(self.uri, self.module)
+
+ def get_net(self, entryid):
+ return self.conn.find_entry(entryid)
+
+ def list_nets(self, state=None):
+ results = []
+ for entry in self.conn.find_entry(-1):
+ if state:
+ if state == self.conn.get_status2(entry):
+ results.append(entry.name())
+ else:
+ results.append(entry.name())
+ return results
+
+ def state(self):
+ results = []
+ for entry in self.list_nets():
+ state_blurb = self.conn.get_status(entry)
+ results.append("%s %s" % (entry,state_blurb))
+ return results
+
+ def autostart(self, entryid):
+ return self.conn.set_autostart(entryid, True)
+
+ def get_autostart(self, entryid):
+ return self.conn.get_autostart2(entryid)
+
+ def set_autostart(self, entryid, state):
+ return self.conn.set_autostart(entryid, state)
+
+ def create(self, entryid):
+ return self.conn.create(entryid)
+
+ def modify(self, entryid, xml):
+ return self.conn.modify(entryid, xml)
+
+ def start(self, entryid):
+ return self.conn.create(entryid)
+
+ def stop(self, entryid):
+ return self.conn.destroy(entryid)
+
+ def destroy(self, entryid):
+ return self.conn.destroy(entryid)
+
+ def undefine(self, entryid):
+ return self.conn.undefine(entryid)
+
+ def status(self, entryid):
+ return self.conn.get_status(entryid)
+
+ def get_xml(self, entryid):
+ return self.conn.get_xml(entryid)
+
+ def define(self, entryid, xml):
+ return self.conn.define_from_xml(entryid, xml)
+
+ def info(self):
+ return self.facts(facts_mode='info')
+
+ def facts(self, facts_mode='facts'):
+ results = dict()
+ for entry in self.list_nets():
+ results[entry] = dict()
+ results[entry]["autostart"] = self.conn.get_autostart(entry)
+ results[entry]["persistent"] = self.conn.get_persistent(entry)
+ results[entry]["state"] = self.conn.get_status(entry)
+ results[entry]["bridge"] = self.conn.get_bridge(entry)
+ results[entry]["uuid"] = self.conn.get_uuid(entry)
+
+ try:
+ results[entry]["forward_mode"] = self.conn.get_forward(entry)
+ except ValueError:
+ pass
+
+ try:
+ results[entry]["domain"] = self.conn.get_domain(entry)
+ except ValueError:
+ pass
+
+ try:
+ results[entry]["macaddress"] = self.conn.get_macaddress(entry)
+ except ValueError:
+ pass
+
+ facts = dict()
+ if facts_mode == 'facts':
+ facts["ansible_facts"] = dict()
+ facts["ansible_facts"]["ansible_libvirt_networks"] = results
+ elif facts_mode == 'info':
+ facts['networks'] = results
+ return facts
+
+
+def core(module):
+
+ state = module.params.get('state', None)
+ name = module.params.get('name', None)
+ command = module.params.get('command', None)
+ uri = module.params.get('uri', None)
+ xml = module.params.get('xml', None)
+ autostart = module.params.get('autostart', None)
+
+ v = VirtNetwork(uri, module)
+ res = {}
+
+ if state and command == 'list_nets':
+ res = v.list_nets(state=state)
+ if not isinstance(res, dict):
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ if state:
+ if not name:
+ module.fail_json(msg = "state change requires a specified name")
+
+ res['changed'] = False
+ if state in [ 'active' ]:
+ if v.status(name) is not 'active':
+ res['changed'] = True
+ res['msg'] = v.start(name)
+ elif state in [ 'present' ]:
+ try:
+ v.get_net(name)
+ except EntryNotFound:
+ if not xml:
+ module.fail_json(msg = "network '" + name + "' not present, but xml not specified")
+ v.define(name, xml)
+ res = {'changed': True, 'created': name}
+ elif state in [ 'inactive' ]:
+ entries = v.list_nets()
+ if name in entries:
+ if v.status(name) is not 'inactive':
+ res['changed'] = True
+ res['msg'] = v.destroy(name)
+ elif state in [ 'undefined', 'absent' ]:
+ entries = v.list_nets()
+ if name in entries:
+ if v.status(name) is not 'inactive':
+ v.destroy(name)
+ res['changed'] = True
+ res['msg'] = v.undefine(name)
+ else:
+ module.fail_json(msg="unexpected state")
+
+ return VIRT_SUCCESS, res
+
+ if command:
+ if command in ENTRY_COMMANDS:
+ if not name:
+ module.fail_json(msg = "%s requires 1 argument: name" % command)
+ if command in ('define', 'modify'):
+ if not xml:
+ module.fail_json(msg = command+" requires xml argument")
+ try:
+ v.get_net(name)
+ except EntryNotFound:
+ v.define(name, xml)
+ res = {'changed': True, 'created': name}
+ else:
+ if command == 'modify':
+ mod = v.modify(name, xml)
+ res = {'changed': mod, 'modified': name}
+ return VIRT_SUCCESS, res
+ res = getattr(v, command)(name)
+ if not isinstance(res, dict):
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ elif hasattr(v, command):
+ res = getattr(v, command)()
+ if not isinstance(res, dict):
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ else:
+ module.fail_json(msg="Command %s not recognized" % command)
+
+ if autostart is not None:
+ if not name:
+ module.fail_json(msg = "state change requires a specified name")
+
+ res['changed'] = False
+ if autostart:
+ if not v.get_autostart(name):
+ res['changed'] = True
+ res['msg'] = v.set_autostart(name, True)
+ else:
+ if v.get_autostart(name):
+ res['changed'] = True
+ res['msg'] = v.set_autostart(name, False)
+
+ return VIRT_SUCCESS, res
+
+ module.fail_json(msg="expected state or command parameter to be specified")
+
+
+def main():
+
+ module = AnsibleModule (
+ argument_spec = dict(
+ name = dict(aliases=['network']),
+ state = dict(choices=['active', 'inactive', 'present', 'absent']),
+ command = dict(choices=ALL_COMMANDS),
+ uri = dict(default='qemu:///system'),
+ xml = dict(),
+ autostart = dict(type='bool')
+ ),
+ supports_check_mode = True
+ )
+
+ if not HAS_VIRT:
+ module.fail_json(
+ msg='The `libvirt` module is not importable. Check the requirements.'
+ )
+
+ if not HAS_XML:
+ module.fail_json(
+ msg='The `lxml` module is not importable. Check the requirements.'
+ )
+
+ rc = VIRT_SUCCESS
+ try:
+ rc, result = core(module)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/misc/virt_pool.py b/lib/ansible/modules/cloud/misc/virt_pool.py
new file mode 100644
index 0000000000..f9a7ba4913
--- /dev/null
+++ b/lib/ansible/modules/cloud/misc/virt_pool.py
@@ -0,0 +1,721 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Maciej Delmanowski <drybjed@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: virt_pool
+author: "Maciej Delmanowski (@drybjed)"
+version_added: "2.0"
+short_description: Manage libvirt storage pools
+description:
+ - Manage I(libvirt) storage pools.
+options:
+ name:
+ required: false
+ aliases: [ "pool" ]
+ description:
+ - name of the storage pool being managed. Note that pool must be previously
+ defined with xml.
+ state:
+ required: false
+ choices: [ "active", "inactive", "present", "absent", "undefined", "deleted" ]
+ description:
+ - specify which state you want a storage pool to be in.
+ If 'active', pool will be started.
+ If 'present', ensure that pool is present but do not change its
+ state; if it's missing, you need to specify xml argument.
+ If 'inactive', pool will be stopped.
+ If 'undefined' or 'absent', pool will be removed from I(libvirt) configuration.
+ If 'deleted', pool contents will be deleted and then pool undefined.
+ command:
+ required: false
+ choices: [ "define", "build", "create", "start", "stop", "destroy",
+ "delete", "undefine", "get_xml", "list_pools", "facts",
+ "info", "status" ]
+ description:
+ - in addition to state management, various non-idempotent commands are available.
+ See examples.
+ autostart:
+ required: false
+ choices: ["yes", "no"]
+ description:
+ - Specify if a given storage pool should be started automatically on system boot.
+ uri:
+ required: false
+ default: "qemu:///system"
+ description:
+ - I(libvirt) connection uri.
+ xml:
+ required: false
+ description:
+ - XML document used with the define command.
+ mode:
+ required: false
+ choices: [ 'new', 'repair', 'resize', 'no_overwrite', 'overwrite', 'normal', 'zeroed' ]
+ description:
+ - Pass additional parameters to 'build' or 'delete' commands.
+requirements:
+ - "python >= 2.6"
+ - "python-libvirt"
+ - "python-lxml"
+'''
+
+EXAMPLES = '''
+# Define a new storage pool
+- virt_pool:
+ command: define
+ name: vms
+ xml: '{{ lookup("template", "pool/dir.xml.j2") }}'
+
+# Build a storage pool if it does not exist
+- virt_pool:
+ command: build
+ name: vms
+
+# Start a storage pool
+- virt_pool:
+ command: create
+ name: vms
+
+# List available pools
+- virt_pool:
+ command: list_pools
+
+# Get XML data of a specified pool
+- virt_pool:
+ command: get_xml
+ name: vms
+
+# Stop a storage pool
+- virt_pool:
+ command: destroy
+ name: vms
+
+# Delete a storage pool (destroys contents)
+- virt_pool:
+ command: delete
+ name: vms
+
+# Undefine a storage pool
+- virt_pool:
+ command: undefine
+ name: vms
+
+# Gather facts about storage pools
+# Facts will be available as 'ansible_libvirt_pools'
+- virt_pool:
+ command: facts
+
+# Gather information about pools managed by 'libvirt' remotely using uri
+- virt_pool:
+ command: info
+ uri: '{{ item }}'
+ with_items: '{{ libvirt_uris }}'
+ register: storage_pools
+
+# Ensure that a pool is active (needs to be defined and built first)
+- virt_pool:
+ state: active
+ name: vms
+
+# Ensure that a pool is inactive
+- virt_pool:
+ state: inactive
+ name: vms
+
+# Ensure that a given pool will be started at boot
+- virt_pool:
+ autostart: yes
+ name: vms
+
+# Disable autostart for a given pool
+- virt_pool:
+ autostart: no
+ name: vms
+'''
+
+VIRT_FAILED = 1
+VIRT_SUCCESS = 0
+VIRT_UNAVAILABLE=2
+
+try:
+ import libvirt
+except ImportError:
+ HAS_VIRT = False
+else:
+ HAS_VIRT = True
+
+try:
+ from lxml import etree
+except ImportError:
+ HAS_XML = False
+else:
+ HAS_XML = True
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+ALL_COMMANDS = []
+ENTRY_COMMANDS = ['create', 'status', 'start', 'stop', 'build', 'delete',
+ 'undefine', 'destroy', 'get_xml', 'define', 'refresh']
+HOST_COMMANDS = [ 'list_pools', 'facts', 'info' ]
+ALL_COMMANDS.extend(ENTRY_COMMANDS)
+ALL_COMMANDS.extend(HOST_COMMANDS)
+
+ENTRY_STATE_ACTIVE_MAP = {
+ 0 : "inactive",
+ 1 : "active"
+}
+
+ENTRY_STATE_AUTOSTART_MAP = {
+ 0 : "no",
+ 1 : "yes"
+}
+
+ENTRY_STATE_PERSISTENT_MAP = {
+ 0 : "no",
+ 1 : "yes"
+}
+
+ENTRY_STATE_INFO_MAP = {
+ 0 : "inactive",
+ 1 : "building",
+ 2 : "running",
+ 3 : "degraded",
+ 4 : "inaccessible"
+}
+
+ENTRY_BUILD_FLAGS_MAP = {
+ "new" : 0,
+ "repair" : 1,
+ "resize" : 2,
+ "no_overwrite" : 4,
+ "overwrite" : 8
+}
+
+ENTRY_DELETE_FLAGS_MAP = {
+ "normal" : 0,
+ "zeroed" : 1
+}
+
+ALL_MODES = []
+ALL_MODES.extend(ENTRY_BUILD_FLAGS_MAP.keys())
+ALL_MODES.extend(ENTRY_DELETE_FLAGS_MAP.keys())
+
+
+class EntryNotFound(Exception):
+ pass
+
+
+class LibvirtConnection(object):
+
+ def __init__(self, uri, module):
+
+ self.module = module
+
+ conn = libvirt.open(uri)
+
+ if not conn:
+ raise Exception("hypervisor connection failure")
+
+ self.conn = conn
+
+ def find_entry(self, entryid):
+ # entryid = -1 returns a list of everything
+
+ results = []
+
+ # Get active entries
+ for name in self.conn.listStoragePools():
+ entry = self.conn.storagePoolLookupByName(name)
+ results.append(entry)
+
+ # Get inactive entries
+ for name in self.conn.listDefinedStoragePools():
+ entry = self.conn.storagePoolLookupByName(name)
+ results.append(entry)
+
+ if entryid == -1:
+ return results
+
+ for entry in results:
+ if entry.name() == entryid:
+ return entry
+
+ raise EntryNotFound("storage pool %s not found" % entryid)
+
+ def create(self, entryid):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).create()
+ else:
+ try:
+ state = self.find_entry(entryid).isActive()
+ except:
+ return self.module.exit_json(changed=True)
+ if not state:
+ return self.module.exit_json(changed=True)
+
+ def destroy(self, entryid):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).destroy()
+ else:
+ if self.find_entry(entryid).isActive():
+ return self.module.exit_json(changed=True)
+
+ def undefine(self, entryid):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).undefine()
+ else:
+ if not self.find_entry(entryid):
+ return self.module.exit_json(changed=True)
+
+ def get_status2(self, entry):
+ state = entry.isActive()
+ return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+
+ def get_status(self, entryid):
+ if not self.module.check_mode:
+ state = self.find_entry(entryid).isActive()
+ return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+ else:
+ try:
+ state = self.find_entry(entryid).isActive()
+ return ENTRY_STATE_ACTIVE_MAP.get(state,"unknown")
+ except:
+ return ENTRY_STATE_ACTIVE_MAP.get("inactive","unknown")
+
+ def get_uuid(self, entryid):
+ return self.find_entry(entryid).UUIDString()
+
+ def get_xml(self, entryid):
+ return self.find_entry(entryid).XMLDesc(0)
+
+ def get_info(self, entryid):
+ return self.find_entry(entryid).info()
+
+ def get_volume_count(self, entryid):
+ return self.find_entry(entryid).numOfVolumes()
+
+ def get_volume_names(self, entryid):
+ return self.find_entry(entryid).listVolumes()
+
+ def get_devices(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ if xml.xpath('/pool/source/device'):
+ result = []
+ for device in xml.xpath('/pool/source/device'):
+ result.append(device.get('path'))
+ try:
+ return result
+ except:
+ raise ValueError('No devices specified')
+
+ def get_format(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ try:
+ result = xml.xpath('/pool/source/format')[0].get('type')
+ except:
+ raise ValueError('Format not specified')
+ return result
+
+ def get_host(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ try:
+ result = xml.xpath('/pool/source/host')[0].get('name')
+ except:
+ raise ValueError('Host not specified')
+ return result
+
+ def get_source_path(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ try:
+ result = xml.xpath('/pool/source/dir')[0].get('path')
+ except:
+ raise ValueError('Source path not specified')
+ return result
+
+ def get_path(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ return xml.xpath('/pool/target/path')[0].text
+
+ def get_type(self, entryid):
+ xml = etree.fromstring(self.find_entry(entryid).XMLDesc(0))
+ return xml.get('type')
+
+ def build(self, entryid, flags):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).build(flags)
+ else:
+ try:
+ state = self.find_entry(entryid)
+ except:
+ return self.module.exit_json(changed=True)
+ if not state:
+ return self.module.exit_json(changed=True)
+
+ def delete(self, entryid, flags):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).delete(flags)
+ else:
+ try:
+ state = self.find_entry(entryid)
+ except:
+ return self.module.exit_json(changed=True)
+ if state:
+ return self.module.exit_json(changed=True)
+
+ def get_autostart(self, entryid):
+ state = self.find_entry(entryid).autostart()
+ return ENTRY_STATE_AUTOSTART_MAP.get(state,"unknown")
+
+ def get_autostart2(self, entryid):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).autostart()
+ else:
+ try:
+ return self.find_entry(entryid).autostart()
+ except:
+ return self.module.exit_json(changed=True)
+
+ def set_autostart(self, entryid, val):
+ if not self.module.check_mode:
+ return self.find_entry(entryid).setAutostart(val)
+ else:
+ try:
+ state = self.find_entry(entryid).autostart()
+ except:
+ return self.module.exit_json(changed=True)
+ if bool(state) != val:
+ return self.module.exit_json(changed=True)
+
+ def refresh(self, entryid):
+ return self.find_entry(entryid).refresh()
+
+ def get_persistent(self, entryid):
+ state = self.find_entry(entryid).isPersistent()
+ return ENTRY_STATE_PERSISTENT_MAP.get(state,"unknown")
+
+ def define_from_xml(self, entryid, xml):
+ if not self.module.check_mode:
+ return self.conn.storagePoolDefineXML(xml)
+ else:
+ try:
+ self.find_entry(entryid)
+ except:
+ return self.module.exit_json(changed=True)
+
+
+class VirtStoragePool(object):
+
+ def __init__(self, uri, module):
+ self.module = module
+ self.uri = uri
+ self.conn = LibvirtConnection(self.uri, self.module)
+
+ def get_pool(self, entryid):
+ return self.conn.find_entry(entryid)
+
+ def list_pools(self, state=None):
+ results = []
+ for entry in self.conn.find_entry(-1):
+ if state:
+ if state == self.conn.get_status2(entry):
+ results.append(entry.name())
+ else:
+ results.append(entry.name())
+ return results
+
+ def state(self):
+ results = []
+ for entry in self.list_pools():
+ state_blurb = self.conn.get_status(entry)
+ results.append("%s %s" % (entry,state_blurb))
+ return results
+
+ def autostart(self, entryid):
+ return self.conn.set_autostart(entryid, True)
+
+ def get_autostart(self, entryid):
+ return self.conn.get_autostart2(entryid)
+
+ def set_autostart(self, entryid, state):
+ return self.conn.set_autostart(entryid, state)
+
+ def create(self, entryid):
+ return self.conn.create(entryid)
+
+ def start(self, entryid):
+ return self.conn.create(entryid)
+
+ def stop(self, entryid):
+ return self.conn.destroy(entryid)
+
+ def destroy(self, entryid):
+ return self.conn.destroy(entryid)
+
+ def undefine(self, entryid):
+ return self.conn.undefine(entryid)
+
+ def status(self, entryid):
+ return self.conn.get_status(entryid)
+
+ def get_xml(self, entryid):
+ return self.conn.get_xml(entryid)
+
+ def define(self, entryid, xml):
+ return self.conn.define_from_xml(entryid, xml)
+
+ def build(self, entryid, flags):
+ return self.conn.build(entryid, ENTRY_BUILD_FLAGS_MAP.get(flags,0))
+
+ def delete(self, entryid, flags):
+ return self.conn.delete(entryid, ENTRY_DELETE_FLAGS_MAP.get(flags,0))
+
+ def refresh(self, entryid):
+ return self.conn.refresh(entryid)
+
+ def info(self):
+ return self.facts(facts_mode='info')
+
+ def facts(self, facts_mode='facts'):
+ results = dict()
+ for entry in self.list_pools():
+ results[entry] = dict()
+ if self.conn.find_entry(entry):
+ data = self.conn.get_info(entry)
+ # libvirt returns maxMem, memory, and cpuTime as long()'s, which
+ # xmlrpclib tries to convert to regular int's during serialization.
+ # This throws exceptions, so convert them to strings here and
+ # assume the other end of the xmlrpc connection can figure things
+ # out or doesn't care.
+ results[entry] = {
+ "status" : ENTRY_STATE_INFO_MAP.get(data[0],"unknown"),
+ "size_total" : str(data[1]),
+ "size_used" : str(data[2]),
+ "size_available" : str(data[3]),
+ }
+ results[entry]["autostart"] = self.conn.get_autostart(entry)
+ results[entry]["persistent"] = self.conn.get_persistent(entry)
+ results[entry]["state"] = self.conn.get_status(entry)
+ results[entry]["path"] = self.conn.get_path(entry)
+ results[entry]["type"] = self.conn.get_type(entry)
+ results[entry]["uuid"] = self.conn.get_uuid(entry)
+ if self.conn.find_entry(entry).isActive():
+ results[entry]["volume_count"] = self.conn.get_volume_count(entry)
+ results[entry]["volumes"] = list()
+ for volume in self.conn.get_volume_names(entry):
+ results[entry]["volumes"].append(volume)
+ else:
+ results[entry]["volume_count"] = -1
+
+ try:
+ results[entry]["host"] = self.conn.get_host(entry)
+ except ValueError:
+ pass
+
+ try:
+ results[entry]["source_path"] = self.conn.get_source_path(entry)
+ except ValueError:
+ pass
+
+ try:
+ results[entry]["format"] = self.conn.get_format(entry)
+ except ValueError:
+ pass
+
+ try:
+ devices = self.conn.get_devices(entry)
+ results[entry]["devices"] = devices
+ except ValueError:
+ pass
+
+ else:
+ results[entry]["state"] = self.conn.get_status(entry)
+
+ facts = dict()
+ if facts_mode == 'facts':
+ facts["ansible_facts"] = dict()
+ facts["ansible_facts"]["ansible_libvirt_pools"] = results
+ elif facts_mode == 'info':
+ facts['pools'] = results
+ return facts
+
+
+def core(module):
+
+ state = module.params.get('state', None)
+ name = module.params.get('name', None)
+ command = module.params.get('command', None)
+ uri = module.params.get('uri', None)
+ xml = module.params.get('xml', None)
+ autostart = module.params.get('autostart', None)
+ mode = module.params.get('mode', None)
+
+ v = VirtStoragePool(uri, module)
+ res = {}
+
+ if state and command == 'list_pools':
+ res = v.list_pools(state=state)
+ if not isinstance(res, dict):
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ if state:
+ if not name:
+ module.fail_json(msg = "state change requires a specified name")
+
+ res['changed'] = False
+ if state in [ 'active' ]:
+ if v.status(name) is not 'active':
+ res['changed'] = True
+ res['msg'] = v.start(name)
+ elif state in [ 'present' ]:
+ try:
+ v.get_pool(name)
+ except EntryNotFound:
+ if not xml:
+ module.fail_json(msg = "storage pool '" + name + "' not present, but xml not specified")
+ v.define(name, xml)
+ res = {'changed': True, 'created': name}
+ elif state in [ 'inactive' ]:
+ entries = v.list_pools()
+ if name in entries:
+ if v.status(name) is not 'inactive':
+ res['changed'] = True
+ res['msg'] = v.destroy(name)
+ elif state in [ 'undefined', 'absent' ]:
+ entries = v.list_pools()
+ if name in entries:
+ if v.status(name) is not 'inactive':
+ v.destroy(name)
+ res['changed'] = True
+ res['msg'] = v.undefine(name)
+ elif state in [ 'deleted' ]:
+ entries = v.list_pools()
+ if name in entries:
+ if v.status(name) is not 'inactive':
+ v.destroy(name)
+ v.delete(name, mode)
+ res['changed'] = True
+ res['msg'] = v.undefine(name)
+ else:
+ module.fail_json(msg="unexpected state")
+
+ return VIRT_SUCCESS, res
+
+ if command:
+ if command in ENTRY_COMMANDS:
+ if not name:
+ module.fail_json(msg = "%s requires 1 argument: name" % command)
+ if command == 'define':
+ if not xml:
+ module.fail_json(msg = "define requires xml argument")
+ try:
+ v.get_pool(name)
+ except EntryNotFound:
+ v.define(name, xml)
+ res = {'changed': True, 'created': name}
+ return VIRT_SUCCESS, res
+ elif command == 'build':
+ res = v.build(name, mode)
+ if not isinstance(res, dict):
+ res = { 'changed': True, command: res }
+ return VIRT_SUCCESS, res
+ elif command == 'delete':
+ res = v.delete(name, mode)
+ if not isinstance(res, dict):
+ res = { 'changed': True, command: res }
+ return VIRT_SUCCESS, res
+ res = getattr(v, command)(name)
+ if not isinstance(res, dict):
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ elif hasattr(v, command):
+ res = getattr(v, command)()
+ if not isinstance(res, dict):
+ res = { command: res }
+ return VIRT_SUCCESS, res
+
+ else:
+ module.fail_json(msg="Command %s not recognized" % command)
+
+ if autostart is not None:
+ if not name:
+ module.fail_json(msg = "state change requires a specified name")
+
+ res['changed'] = False
+ if autostart:
+ if not v.get_autostart(name):
+ res['changed'] = True
+ res['msg'] = v.set_autostart(name, True)
+ else:
+ if v.get_autostart(name):
+ res['changed'] = True
+ res['msg'] = v.set_autostart(name, False)
+
+ return VIRT_SUCCESS, res
+
+ module.fail_json(msg="expected state or command parameter to be specified")
+
+
+def main():
+
+ module = AnsibleModule (
+ argument_spec = dict(
+ name = dict(aliases=['pool']),
+ state = dict(choices=['active', 'inactive', 'present', 'absent', 'undefined', 'deleted']),
+ command = dict(choices=ALL_COMMANDS),
+ uri = dict(default='qemu:///system'),
+ xml = dict(),
+ autostart = dict(type='bool'),
+ mode = dict(choices=ALL_MODES),
+ ),
+ supports_check_mode = True
+ )
+
+ if not HAS_VIRT:
+ module.fail_json(
+ msg='The `libvirt` module is not importable. Check the requirements.'
+ )
+
+ if not HAS_XML:
+ module.fail_json(
+ msg='The `lxml` module is not importable. Check the requirements.'
+ )
+
+ rc = VIRT_SUCCESS
+ try:
+ rc, result = core(module)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+ if rc != 0: # something went wrong emit the msg
+ module.fail_json(rc=rc, msg=result)
+ else:
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/openstack/os_flavor_facts.py b/lib/ansible/modules/cloud/openstack/os_flavor_facts.py
new file mode 100644
index 0000000000..c6e938b63b
--- /dev/null
+++ b/lib/ansible/modules/cloud/openstack/os_flavor_facts.py
@@ -0,0 +1,249 @@
+#!/usr/bin/python
+
+# Copyright (c) 2015 IBM
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+import re
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+from distutils.version import StrictVersion
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: os_flavor_facts
+short_description: Retrieve facts about one or more flavors
+author: "David Shrewsbury (@Shrews)"
+version_added: "2.1"
+description:
+ - Retrieve facts about available OpenStack instance flavors. By default,
+ facts about ALL flavors are retrieved. Filters can be applied to get
+ facts for only matching flavors. For example, you can filter on the
+ amount of RAM available to the flavor, or the number of virtual CPUs
+ available to the flavor, or both. When specifying multiple filters,
+ *ALL* filters must match on a flavor before that flavor is returned as
+ a fact.
+notes:
+ - This module creates a new top-level C(openstack_flavors) fact, which
+ contains a list of unsorted flavors.
+requirements:
+ - "python >= 2.6"
+ - "shade"
+options:
+ name:
+ description:
+ - A flavor name. Cannot be used with I(ram) or I(vcpus) or I(ephemeral).
+ required: false
+ default: None
+ ram:
+ description:
+ - "A string used for filtering flavors based on the amount of RAM
+ (in MB) desired. This string accepts the following special values:
+ 'MIN' (return flavors with the minimum amount of RAM), and 'MAX'
+ (return flavors with the maximum amount of RAM)."
+
+ - "A specific amount of RAM may also be specified. Any flavors with this
+ exact amount of RAM will be returned."
+
+ - "A range of acceptable RAM may be given using a special syntax. Simply
+ prefix the amount of RAM with one of these acceptable range values:
+ '<', '>', '<=', '>='. These values represent less than, greater than,
+ less than or equal to, and greater than or equal to, respectively."
+ required: false
+ default: false
+ vcpus:
+ description:
+ - A string used for filtering flavors based on the number of virtual
+ CPUs desired. Format is the same as the I(ram) parameter.
+ required: false
+ default: false
+ limit:
+ description:
+ - Limits the number of flavors returned. All matching flavors are
+ returned by default.
+ required: false
+ default: None
+ ephemeral:
+ description:
+ - A string used for filtering flavors based on the amount of ephemeral
+ storage. Format is the same as the I(ram) parameter
+ required: false
+ default: false
+ version_added: "2.3"
+extends_documentation_fragment: openstack
+'''
+
+EXAMPLES = '''
+# Gather facts about all available flavors
+- os_flavor_facts:
+ cloud: mycloud
+
+# Gather facts for the flavor named "xlarge-flavor"
+- os_flavor_facts:
+ cloud: mycloud
+ name: "xlarge-flavor"
+
+# Get all flavors that have exactly 512 MB of RAM.
+- os_flavor_facts:
+ cloud: mycloud
+ ram: "512"
+
+# Get all flavors that have 1024 MB or more of RAM.
+- os_flavor_facts:
+ cloud: mycloud
+ ram: ">=1024"
+
+# Get a single flavor that has the minimum amount of RAM. Using the 'limit'
+# option will guarantee only a single flavor is returned.
+- os_flavor_facts:
+ cloud: mycloud
+ ram: "MIN"
+ limit: 1
+
+# Get all flavors with 1024 MB of RAM or more, AND exactly 2 virtual CPUs.
+- os_flavor_facts:
+ cloud: mycloud
+ ram: ">=1024"
+ vcpus: "2"
+
+# Get all flavors with 1024 MB of RAM or more, exactly 2 virtual CPUs, and
+# less than 30gb of ephemeral storage.
+- os_flavor_facts:
+ cloud: mycloud
+ ram: ">=1024"
+ vcpus: "2"
+ ephemeral: "<30"
+'''
+
+
+RETURN = '''
+openstack_flavors:
+ description: Dictionary describing the flavors.
+ returned: On success.
+ type: dictionary
+ contains:
+ id:
+ description: Flavor ID.
+ returned: success
+ type: string
+ sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
+ name:
+ description: Flavor name.
+ returned: success
+ type: string
+ sample: "tiny"
+ disk:
+ description: Size of local disk, in GB.
+ returned: success
+ type: int
+ sample: 10
+ ephemeral:
+ description: Ephemeral space size, in GB.
+ returned: success
+ type: int
+ sample: 10
+ ram:
+ description: Amount of memory, in MB.
+ returned: success
+ type: int
+ sample: 1024
+ swap:
+ description: Swap space size, in MB.
+ returned: success
+ type: int
+ sample: 100
+ vcpus:
+ description: Number of virtual CPUs.
+ returned: success
+ type: int
+ sample: 2
+ is_public:
+ description: Make flavor accessible to the public.
+ returned: success
+ type: bool
+ sample: true
+'''
+
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=False, default=None),
+ ram=dict(required=False, default=None),
+ vcpus=dict(required=False, default=None),
+ limit=dict(required=False, default=None, type='int'),
+ ephemeral=dict(required=False, default=None),
+ )
+ module_kwargs = openstack_module_kwargs(
+ mutually_exclusive=[
+ ['name', 'ram'],
+ ['name', 'vcpus'],
+ ['name', 'ephemeral']
+ ]
+ )
+ module = AnsibleModule(argument_spec, **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ name = module.params['name']
+ vcpus = module.params['vcpus']
+ ram = module.params['ram']
+ ephemeral = module.params['ephemeral']
+ limit = module.params['limit']
+
+ try:
+ cloud = shade.openstack_cloud(**module.params)
+ if name:
+ flavors = cloud.search_flavors(filters={'name': name})
+
+ else:
+ flavors = cloud.list_flavors()
+ filters = {}
+ if vcpus:
+ filters['vcpus'] = vcpus
+ if ram:
+ filters['ram'] = ram
+ if ephemeral:
+ filters['ephemeral'] = ephemeral
+ if filters:
+ # Range search added in 1.5.0
+ if StrictVersion(shade.__version__) < StrictVersion('1.5.0'):
+ module.fail_json(msg="Shade >= 1.5.0 needed for this functionality")
+ flavors = cloud.range_search(flavors, filters)
+
+ if limit is not None:
+ flavors = flavors[:limit]
+
+ module.exit_json(changed=False,
+ ansible_facts=dict(openstack_flavors=flavors))
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/openstack/os_group.py b/lib/ansible/modules/cloud/openstack/os_group.py
new file mode 100644
index 0000000000..2347efb483
--- /dev/null
+++ b/lib/ansible/modules/cloud/openstack/os_group.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# Copyright (c) 2016 IBM
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: os_group
+short_description: Manage OpenStack Identity Groups
+extends_documentation_fragment: openstack
+version_added: "2.1"
+author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)"
+description:
+ - Manage OpenStack Identity Groups. Groups can be created, deleted or
+ updated. Only the I(description) value can be updated.
+options:
+ name:
+ description:
+ - Group name
+ required: true
+ description:
+ description:
+ - Group description
+ required: false
+ default: None
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Create a group named "demo"
+- os_group:
+ cloud: mycloud
+ state: present
+ name: demo
+ description: "Demo Group"
+
+# Update the description on existing "demo" group
+- os_group:
+ cloud: mycloud
+ state: present
+ name: demo
+ description: "Something else"
+
+# Delete group named "demo"
+- os_group:
+ cloud: mycloud
+ state: absent
+ name: demo
+'''
+
+RETURN = '''
+group:
+ description: Dictionary describing the group.
+ returned: On success when I(state) is 'present'.
+ type: dictionary
+ contains:
+ id:
+ description: Unique group ID
+ type: string
+ sample: "ee6156ff04c645f481a6738311aea0b0"
+ name:
+ description: Group name
+ type: string
+ sample: "demo"
+ description:
+ description: Group description
+ type: string
+ sample: "Demo Group"
+ domain_id:
+ description: Domain for the group
+ type: string
+ sample: "default"
+'''
+
+
+def _system_state_change(state, description, group):
+ if state == 'present' and not group:
+ return True
+ if state == 'present' and description is not None and group.description != description:
+ return True
+ if state == 'absent' and group:
+ return True
+ return False
+
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=True),
+ description=dict(required=False, default=None),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True,
+ **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ name = module.params.pop('name')
+ description = module.params.pop('description')
+ state = module.params.pop('state')
+
+ try:
+ cloud = shade.operator_cloud(**module.params)
+ group = cloud.get_group(name)
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(state, description, group))
+
+ if state == 'present':
+ if group is None:
+ group = cloud.create_group(
+ name=name, description=description)
+ changed = True
+ else:
+ if description is not None and group.description != description:
+ group = cloud.update_group(
+ group.id, description=description)
+ changed = True
+ else:
+ changed = False
+ module.exit_json(changed=changed, group=group)
+
+ elif state == 'absent':
+ if group is None:
+ changed=False
+ else:
+ cloud.delete_group(group.id)
+ changed=True
+ module.exit_json(changed=changed)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/openstack/os_ironic_inspect.py b/lib/ansible/modules/cloud/openstack/os_ironic_inspect.py
new file mode 100644
index 0000000000..b436f7f042
--- /dev/null
+++ b/lib/ansible/modules/cloud/openstack/os_ironic_inspect.py
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# coding: utf-8 -*-
+
+# (c) 2015-2016, Hewlett Packard Enterprise Development Company LP
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+from distutils.version import StrictVersion
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: os_ironic_inspect
+short_description: Explicitly triggers baremetal node introspection in ironic.
+extends_documentation_fragment: openstack
+author: "Julia Kreger (@juliakreger)"
+version_added: "2.1"
+description:
+ - Requests Ironic to set a node into inspect state in order to collect metadata regarding the node.
+ This command may be out of band or in-band depending on the ironic driver configuration.
+ This is only possible on nodes in 'manageable' and 'available' state.
+options:
+ mac:
+ description:
+ - unique mac address that is used to attempt to identify the host.
+ required: false
+ default: None
+ uuid:
+ description:
+ - globally unique identifier (UUID) to identify the host.
+ required: false
+ default: None
+ name:
+ description:
+ - unique name identifier to identify the host in Ironic.
+ required: false
+ default: None
+ ironic_url:
+ description:
+ - If noauth mode is utilized, this is required to be set to the endpoint URL for the Ironic API.
+ Use with "auth" and "auth_type" settings set to None.
+ required: false
+ default: None
+ timeout:
+ description:
+ - A timeout in seconds to tell the role to wait for the node to complete introspection if wait is set to True.
+ required: false
+ default: 1200
+
+requirements: ["shade"]
+'''
+
+RETURN = '''
+ansible_facts:
+ description: Dictionary of new facts representing discovered properties of the node..
+ returned: changed
+ type: dictionary
+ contains:
+ memory_mb:
+ description: Amount of node memory as updated in the node properties
+ type: string
+ sample: "1024"
+ cpu_arch:
+ description: Detected CPU architecture type
+ type: string
+ sample: "x86_64"
+ local_gb:
+ description: Total size of local disk storage as updaed in node properties.
+ type: string
+ sample: "10"
+ cpus:
+ description: Count of cpu cores defined in the updated node properties.
+ type: string
+ sample: "1"
+'''
+
+EXAMPLES = '''
+# Invoke node inspection
+- os_ironic_inspect:
+ name: "testnode1"
+'''
+
+
+def _choose_id_value(module):
+ if module.params['uuid']:
+ return module.params['uuid']
+ if module.params['name']:
+ return module.params['name']
+ return None
+
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ auth_type=dict(required=False),
+ uuid=dict(required=False),
+ name=dict(required=False),
+ mac=dict(required=False),
+ ironic_url=dict(required=False),
+ timeout=dict(default=1200, type='int', required=False),
+ )
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec, **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+ if StrictVersion(shade.__version__) < StrictVersion('1.0.0'):
+ module.fail_json(msg="To utilize this module, the installed version of"
+ "the shade library MUST be >=1.0.0")
+
+ if (module.params['auth_type'] in [None, 'None'] and
+ module.params['ironic_url'] is None):
+ module.fail_json(msg="Authentication appears to be disabled, "
+ "Please define an ironic_url parameter")
+
+ if (module.params['ironic_url'] and
+ module.params['auth_type'] in [None, 'None']):
+ module.params['auth'] = dict(
+ endpoint=module.params['ironic_url']
+ )
+
+ try:
+ cloud = shade.operator_cloud(**module.params)
+
+ if module.params['name'] or module.params['uuid']:
+ server = cloud.get_machine(_choose_id_value(module))
+ elif module.params['mac']:
+ server = cloud.get_machine_by_mac(module.params['mac'])
+ else:
+ module.fail_json(msg="The worlds did not align, "
+ "the host was not found as "
+ "no name, uuid, or mac was "
+ "defined.")
+ if server:
+ cloud.inspect_machine(server['uuid'], module.params['wait'])
+ # TODO(TheJulia): diff properties, ?and ports? and determine
+ # if a change occured. In theory, the node is always changed
+ # if introspection is able to update the record.
+ module.exit_json(changed=True,
+ ansible_facts=server['properties'])
+
+ else:
+ module.fail_json(msg="node not found.")
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+# this is magic, see lib/ansible/module_common.py
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/openstack/os_keystone_domain.py b/lib/ansible/modules/cloud/openstack/os_keystone_domain.py
new file mode 100644
index 0000000000..b355971e8b
--- /dev/null
+++ b/lib/ansible/modules/cloud/openstack/os_keystone_domain.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: os_keystone_domain
+short_description: Manage OpenStack Identity Domains
+extends_documentation_fragment: openstack
+version_added: "2.1"
+description:
+ - Create, update, or delete OpenStack Identity domains. If a domain
+ with the supplied name already exists, it will be updated with the
+ new description and enabled attributes.
+options:
+ name:
+ description:
+ - Name that has to be given to the instance
+ required: true
+ description:
+ description:
+ - Description of the domain
+ required: false
+ default: None
+ enabled:
+ description:
+ - Is the domain enabled
+ required: false
+ default: True
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Create a domain
+- os_keystone_domain:
+ cloud: mycloud
+ state: present
+ name: demo
+ description: Demo Domain
+
+# Delete a domain
+- os_keystone_domain:
+ cloud: mycloud
+ state: absent
+ name: demo
+'''
+
+RETURN = '''
+domain:
+ description: Dictionary describing the domain.
+ returned: On success when I(state) is 'present'
+ type: dictionary
+ contains:
+ id:
+ description: Domain ID.
+ type: string
+ sample: "474acfe5-be34-494c-b339-50f06aa143e4"
+ name:
+ description: Domain name.
+ type: string
+ sample: "demo"
+ description:
+ description: Domain description.
+ type: string
+ sample: "Demo Domain"
+ enabled:
+ description: Domain description.
+ type: boolean
+ sample: True
+
+id:
+ description: The domain ID.
+ returned: On success when I(state) is 'present'
+ type: string
+ sample: "474acfe5-be34-494c-b339-50f06aa143e4"
+'''
+
+def _needs_update(module, domain):
+ if domain.description != module.params['description']:
+ return True
+ if domain.enabled != module.params['enabled']:
+ return True
+ return False
+
+def _system_state_change(module, domain):
+ state = module.params['state']
+ if state == 'absent' and domain:
+ return True
+
+ if state == 'present':
+ if domain is None:
+ return True
+ return _needs_update(module, domain)
+
+ return False
+
+def main():
+
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=True),
+ description=dict(default=None),
+ enabled=dict(default=True, type='bool'),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True,
+ **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ name = module.params['name']
+ description = module.params['description']
+ enabled = module.params['enabled']
+ state = module.params['state']
+
+ try:
+ cloud = shade.operator_cloud(**module.params)
+
+ domains = cloud.search_domains(filters=dict(name=name))
+
+ if len(domains) > 1:
+ module.fail_json(msg='Domain name %s is not unique' % name)
+ elif len(domains) == 1:
+ domain = domains[0]
+ else:
+ domain = None
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(module, domain))
+
+ if state == 'present':
+ if domain is None:
+ domain = cloud.create_domain(
+ name=name, description=description, enabled=enabled)
+ changed = True
+ else:
+ if _needs_update(module, domain):
+ domain = cloud.update_domain(
+ domain.id, name=name, description=description,
+ enabled=enabled)
+ changed = True
+ else:
+ changed = False
+ module.exit_json(changed=changed, domain=domain, id=domain.id)
+
+ elif state == 'absent':
+ if domain is None:
+ changed=False
+ else:
+ cloud.delete_domain(domain.id)
+ changed=True
+ module.exit_json(changed=changed)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/openstack/os_keystone_domain_facts.py b/lib/ansible/modules/cloud/openstack/os_keystone_domain_facts.py
new file mode 100644
index 0000000000..9e36341521
--- /dev/null
+++ b/lib/ansible/modules/cloud/openstack/os_keystone_domain_facts.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: os_keystone_domain_facts
+short_description: Retrieve facts about one or more OpenStack domains
+extends_documentation_fragment: openstack
+version_added: "2.1"
+author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
+description:
+ - Retrieve facts about a one or more OpenStack domains
+requirements:
+ - "python >= 2.6"
+ - "shade"
+options:
+ name:
+ description:
+ - Name or ID of the domain
+ required: true
+ filters:
+ description:
+ - A dictionary of meta data to use for further filtering. Elements of
+ this dictionary may be additional dictionaries.
+ required: false
+ default: None
+'''
+
+EXAMPLES = '''
+# Gather facts about previously created domain
+- os_keystone_domain_facts:
+ cloud: awesomecloud
+- debug:
+ var: openstack_domains
+
+# Gather facts about a previously created domain by name
+- os_keystone_domain_facts:
+ cloud: awesomecloud
+ name: demodomain
+- debug:
+ var: openstack_domains
+
+# Gather facts about a previously created domain with filter
+- os_keystone_domain_facts
+ cloud: awesomecloud
+ name: demodomain
+ filters:
+ enabled: False
+- debug:
+ var: openstack_domains
+'''
+
+
+RETURN = '''
+openstack_domains:
+ description: has all the OpenStack facts about domains
+ returned: always, but can be null
+ type: complex
+ contains:
+ id:
+ description: Unique UUID.
+ returned: success
+ type: string
+ name:
+ description: Name given to the domain.
+ returned: success
+ type: string
+ description:
+ description: Description of the domain.
+ returned: success
+ type: string
+ enabled:
+ description: Flag to indicate if the domain is enabled.
+ returned: success
+ type: bool
+'''
+
+def main():
+
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=False, default=None),
+ filters=dict(required=False, type='dict', default=None),
+ )
+ module_kwargs = openstack_module_kwargs(
+ mutually_exclusive=[
+ ['name', 'filters'],
+ ]
+ )
+ module = AnsibleModule(argument_spec, **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ try:
+ name = module.params['name']
+ filters = module.params['filters']
+
+ opcloud = shade.operator_cloud(**module.params)
+
+ if name:
+ # Let's suppose user is passing domain ID
+ try:
+ domains = cloud.get_domain(name)
+ except:
+ domains = opcloud.search_domains(filters={'name': name})
+
+ else:
+ domains = opcloud.search_domains(filters)
+
+ module.exit_json(changed=False, ansible_facts=dict(
+ openstack_domains=domains))
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/openstack/os_keystone_role.py b/lib/ansible/modules/cloud/openstack/os_keystone_role.py
new file mode 100644
index 0000000000..db5b0027c0
--- /dev/null
+++ b/lib/ansible/modules/cloud/openstack/os_keystone_role.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+# Copyright (c) 2016 IBM
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: os_keystone_role
+short_description: Manage OpenStack Identity Roles
+extends_documentation_fragment: openstack
+version_added: "2.1"
+author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)"
+description:
+ - Manage OpenStack Identity Roles.
+options:
+ name:
+ description:
+ - Role Name
+ required: true
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Create a role named "demo"
+- os_keystone_role:
+ cloud: mycloud
+ state: present
+ name: demo
+
+# Delete the role named "demo"
+- os_keystone_role:
+ cloud: mycloud
+ state: absent
+ name: demo
+'''
+
+RETURN = '''
+role:
+ description: Dictionary describing the role.
+ returned: On success when I(state) is 'present'.
+ type: dictionary
+ contains:
+ id:
+ description: Unique role ID.
+ type: string
+ sample: "677bfab34c844a01b88a217aa12ec4c2"
+ name:
+ description: Role name.
+ type: string
+ sample: "demo"
+'''
+
+
+def _system_state_change(state, role):
+ if state == 'present' and not role:
+ return True
+ if state == 'absent' and role:
+ return True
+ return False
+
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=True),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True,
+ **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ name = module.params.pop('name')
+ state = module.params.pop('state')
+
+ try:
+ cloud = shade.operator_cloud(**module.params)
+
+ role = cloud.get_role(name)
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(state, role))
+
+ if state == 'present':
+ if role is None:
+ role = cloud.create_role(name)
+ changed = True
+ else:
+ changed = False
+ module.exit_json(changed=changed, role=role)
+ elif state == 'absent':
+ if role is None:
+ changed=False
+ else:
+ cloud.delete_role(name)
+ changed=True
+ module.exit_json(changed=changed)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/openstack/os_keystone_service.py b/lib/ansible/modules/cloud/openstack/os_keystone_service.py
new file mode 100644
index 0000000000..d23f288162
--- /dev/null
+++ b/lib/ansible/modules/cloud/openstack/os_keystone_service.py
@@ -0,0 +1,214 @@
+#!/usr/bin/python
+# Copyright 2016 Sam Yaple
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+from distutils.version import StrictVersion
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: os_keystone_service
+short_description: Manage OpenStack Identity services
+extends_documentation_fragment: openstack
+author: "Sam Yaple (@SamYaple)"
+version_added: "2.2"
+description:
+ - Create, update, or delete OpenStack Identity service. If a service
+ with the supplied name already exists, it will be updated with the
+ new description and enabled attributes.
+options:
+ name:
+ description:
+ - Name of the service
+ required: true
+ description:
+ description:
+ - Description of the service
+ required: false
+ default: None
+ enabled:
+ description:
+ - Is the service enabled
+ required: false
+ default: True
+ service_type:
+ description:
+ - The type of service
+ required: true
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Create a service for glance
+- os_keystone_service:
+ cloud: mycloud
+ state: present
+ name: glance
+ service_type: image
+ description: OpenStack Image Service
+# Delete a service
+- os_keystone_service:
+ cloud: mycloud
+ state: absent
+ name: glance
+ service_type: image
+'''
+
+RETURN = '''
+service:
+ description: Dictionary describing the service.
+ returned: On success when I(state) is 'present'
+ type: dictionary
+ contains:
+ id:
+ description: Service ID.
+ type: string
+ sample: "3292f020780b4d5baf27ff7e1d224c44"
+ name:
+ description: Service name.
+ type: string
+ sample: "glance"
+ service_type:
+ description: Service type.
+ type: string
+ sample: "image"
+ description:
+ description: Service description.
+ type: string
+ sample: "OpenStack Image Service"
+ enabled:
+ description: Service status.
+ type: boolean
+ sample: True
+id:
+ description: The service ID.
+ returned: On success when I(state) is 'present'
+ type: string
+ sample: "3292f020780b4d5baf27ff7e1d224c44"
+'''
+
+
+def _needs_update(module, service):
+ if service.enabled != module.params['enabled']:
+ return True
+ if service.description is not None and \
+ service.description != module.params['description']:
+ return True
+ return False
+
+
+def _system_state_change(module, service):
+ state = module.params['state']
+ if state == 'absent' and service:
+ return True
+
+ if state == 'present':
+ if service is None:
+ return True
+ return _needs_update(module, service)
+
+ return False
+
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ description=dict(default=None),
+ enabled=dict(default=True, type='bool'),
+ name=dict(required=True),
+ service_type=dict(required=True),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True,
+ **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+ if StrictVersion(shade.__version__) < StrictVersion('1.6.0'):
+ module.fail_json(msg="To utilize this module, the installed version of"
+ "the shade library MUST be >=1.6.0")
+
+ description = module.params['description']
+ enabled = module.params['enabled']
+ name = module.params['name']
+ state = module.params['state']
+ service_type = module.params['service_type']
+
+ try:
+ cloud = shade.operator_cloud(**module.params)
+
+ services = cloud.search_services(name_or_id=name,
+ filters=dict(type=service_type))
+
+ if len(services) > 1:
+ module.fail_json(msg='Service name %s and type %s are not unique' %
+ (name, service_type))
+ elif len(services) == 1:
+ service = services[0]
+ else:
+ service = None
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(module, service))
+
+ if state == 'present':
+ if service is None:
+ service = cloud.create_service(name=name,
+ description=description, type=service_type, enabled=True)
+ changed = True
+ else:
+ if _needs_update(module, service):
+ service = cloud.update_service(
+ service.id, name=name, type=service_type, enabled=enabled,
+ description=description)
+ changed = True
+ else:
+ changed = False
+ module.exit_json(changed=changed, service=service, id=service.id)
+
+ elif state == 'absent':
+ if service is None:
+ changed=False
+ else:
+ cloud.delete_service(service.id)
+ changed=True
+ module.exit_json(changed=changed)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/openstack/os_port_facts.py b/lib/ansible/modules/cloud/openstack/os_port_facts.py
new file mode 100644
index 0000000000..0da37d88ef
--- /dev/null
+++ b/lib/ansible/modules/cloud/openstack/os_port_facts.py
@@ -0,0 +1,229 @@
+#!/usr/bin/python
+
+# Copyright (c) 2016 IBM
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+module: os_port_facts
+short_description: Retrieve facts about ports within OpenStack.
+version_added: "2.1"
+author: "David Shrewsbury (@Shrews)"
+description:
+ - Retrieve facts about ports from OpenStack.
+notes:
+ - Facts are placed in the C(openstack_ports) variable.
+requirements:
+ - "python >= 2.6"
+ - "shade"
+options:
+ port:
+ description:
+ - Unique name or ID of a port.
+ required: false
+ default: null
+ filters:
+ description:
+ - A dictionary of meta data to use for further filtering. Elements
+ of this dictionary will be matched against the returned port
+ dictionaries. Matching is currently limited to strings within
+ the port dictionary, or strings within nested dictionaries.
+ required: false
+ default: null
+extends_documentation_fragment: openstack
+'''
+
+EXAMPLES = '''
+# Gather facts about all ports
+- os_port_facts:
+ cloud: mycloud
+
+# Gather facts about a single port
+- os_port_facts:
+ cloud: mycloud
+ port: 6140317d-e676-31e1-8a4a-b1913814a471
+
+# Gather facts about all ports that have device_id set to a specific value
+# and with a status of ACTIVE.
+- os_port_facts:
+ cloud: mycloud
+ filters:
+ device_id: 1038a010-3a37-4a9d-82ea-652f1da36597
+ status: ACTIVE
+'''
+
+RETURN = '''
+openstack_ports:
+ description: List of port dictionaries. A subset of the dictionary keys
+ listed below may be returned, depending on your cloud provider.
+ returned: always, but can be null
+ type: complex
+ contains:
+ admin_state_up:
+ description: The administrative state of the router, which is
+ up (true) or down (false).
+ returned: success
+ type: boolean
+ sample: true
+ allowed_address_pairs:
+ description: A set of zero or more allowed address pairs. An
+ address pair consists of an IP address and MAC address.
+ returned: success
+ type: list
+ sample: []
+ "binding:host_id":
+ description: The UUID of the host where the port is allocated.
+ returned: success
+ type: string
+ sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759"
+ "binding:profile":
+ description: A dictionary the enables the application running on
+ the host to pass and receive VIF port-specific
+ information to the plug-in.
+ returned: success
+ type: dict
+ sample: {}
+ "binding:vif_details":
+ description: A dictionary that enables the application to pass
+ information about functions that the Networking API
+ provides.
+ returned: success
+ type: dict
+ sample: {"port_filter": true}
+ "binding:vif_type":
+ description: The VIF type for the port.
+ returned: success
+ type: dict
+ sample: "ovs"
+ "binding:vnic_type":
+ description: The virtual network interface card (vNIC) type that is
+ bound to the neutron port.
+ returned: success
+ type: string
+ sample: "normal"
+ device_id:
+ description: The UUID of the device that uses this port.
+ returned: success
+ type: string
+ sample: "b4bd682d-234a-4091-aa5b-4b025a6a7759"
+ device_owner:
+ description: The UUID of the entity that uses this port.
+ returned: success
+ type: string
+ sample: "network:router_interface"
+ dns_assignment:
+ description: DNS assignment information.
+ returned: success
+ type: list
+ dns_name:
+ description: DNS name
+ returned: success
+ type: string
+ sample: ""
+ extra_dhcp_opts:
+ description: A set of zero or more extra DHCP option pairs.
+ An option pair consists of an option value and name.
+ returned: success
+ type: list
+ sample: []
+ fixed_ips:
+ description: The IP addresses for the port. Includes the IP address
+ and UUID of the subnet.
+ returned: success
+ type: list
+ id:
+ description: The UUID of the port.
+ returned: success
+ type: string
+ sample: "3ec25c97-7052-4ab8-a8ba-92faf84148de"
+ ip_address:
+ description: The IP address.
+ returned: success
+ type: string
+ sample: "127.0.0.1"
+ mac_address:
+ description: The MAC address.
+ returned: success
+ type: string
+ sample: "00:00:5E:00:53:42"
+ name:
+ description: The port name.
+ returned: success
+ type: string
+ sample: "port_name"
+ network_id:
+ description: The UUID of the attached network.
+ returned: success
+ type: string
+ sample: "dd1ede4f-3952-4131-aab6-3b8902268c7d"
+ port_security_enabled:
+ description: The port security status. The status is enabled (true) or disabled (false).
+ returned: success
+ type: boolean
+ sample: false
+ security_groups:
+ description: The UUIDs of any attached security groups.
+ returned: success
+ type: list
+ status:
+ description: The port status.
+ returned: success
+ type: string
+ sample: "ACTIVE"
+ tenant_id:
+ description: The UUID of the tenant who owns the network.
+ returned: success
+ type: string
+ sample: "51fce036d7984ba6af4f6c849f65ef00"
+'''
+
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ port=dict(required=False),
+ filters=dict(type='dict', required=False),
+ )
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec, **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ port = module.params.pop('port')
+ filters = module.params.pop('filters')
+
+ try:
+ cloud = shade.openstack_cloud(**module.params)
+ ports = cloud.search_ports(port, filters)
+ module.exit_json(changed=False, ansible_facts=dict(
+ openstack_ports=ports))
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/openstack/os_project.py b/lib/ansible/modules/cloud/openstack/os_project.py
new file mode 100644
index 0000000000..22f5010755
--- /dev/null
+++ b/lib/ansible/modules/cloud/openstack/os_project.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# Copyright (c) 2015 IBM Corporation
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+from distutils.version import StrictVersion
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: os_project
+short_description: Manage OpenStack Projects
+extends_documentation_fragment: openstack
+version_added: "2.0"
+author: "Alberto Gireud (@agireud)"
+description:
+ - Manage OpenStack Projects. Projects can be created,
+ updated or deleted using this module. A project will be updated
+ if I(name) matches an existing project and I(state) is present.
+ The value for I(name) cannot be updated without deleting and
+ re-creating the project.
+options:
+ name:
+ description:
+ - Name for the project
+ required: true
+ description:
+ description:
+ - Description for the project
+ required: false
+ default: None
+ domain_id:
+ description:
+ - Domain id to create the project in if the cloud supports domains.
+ The domain_id parameter requires shade >= 1.8.0
+ required: false
+ default: None
+ aliases: ['domain']
+ enabled:
+ description:
+ - Is the project enabled
+ required: false
+ default: True
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Create a project
+- os_project:
+ cloud: mycloud
+ state: present
+ name: demoproject
+ description: demodescription
+ domain_id: demoid
+ enabled: True
+
+# Delete a project
+- os_project:
+ cloud: mycloud
+ state: absent
+ name: demoproject
+'''
+
+
+RETURN = '''
+project:
+ description: Dictionary describing the project.
+ returned: On success when I(state) is 'present'
+ type: dictionary
+ contains:
+ id:
+ description: Project ID
+ type: string
+ sample: "f59382db809c43139982ca4189404650"
+ name:
+ description: Project name
+ type: string
+ sample: "demoproject"
+ description:
+ description: Project description
+ type: string
+ sample: "demodescription"
+ enabled:
+ description: Boolean to indicate if project is enabled
+ type: bool
+ sample: True
+'''
+
+def _needs_update(module, project):
+ keys = ('description', 'enabled')
+ for key in keys:
+ if module.params[key] is not None and module.params[key] != project.get(key):
+ return True
+
+ return False
+
+def _system_state_change(module, project):
+ state = module.params['state']
+ if state == 'present':
+ if project is None:
+ changed = True
+ else:
+ if _needs_update(module, project):
+ changed = True
+ else:
+ changed = False
+
+ elif state == 'absent':
+ if project is None:
+ changed=False
+ else:
+ changed=True
+
+ return changed;
+
+def main():
+
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=True),
+ description=dict(required=False, default=None),
+ domain_id=dict(required=False, default=None, aliases=['domain']),
+ enabled=dict(default=True, type='bool'),
+ state=dict(default='present', choices=['absent', 'present'])
+ )
+
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ **module_kwargs
+ )
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ name = module.params['name']
+ description = module.params['description']
+ domain = module.params.pop('domain_id')
+ enabled = module.params['enabled']
+ state = module.params['state']
+
+ if domain and StrictVersion(shade.__version__) < StrictVersion('1.8.0'):
+ module.fail_json(msg="The domain argument requires shade >=1.8.0")
+
+ try:
+ if domain:
+ opcloud = shade.operator_cloud(**module.params)
+ try:
+ # We assume admin is passing domain id
+ dom = opcloud.get_domain(domain)['id']
+ domain = dom
+ except:
+ # If we fail, maybe admin is passing a domain name.
+ # Note that domains have unique names, just like id.
+ try:
+ dom = opcloud.search_domains(filters={'name': domain})[0]['id']
+ domain = dom
+ except:
+ # Ok, let's hope the user is non-admin and passing a sane id
+ pass
+
+ cloud = shade.openstack_cloud(**module.params)
+
+ if domain:
+ project = cloud.get_project(name, domain_id=domain)
+ else:
+ project = cloud.get_project(name)
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(module, project))
+
+ if state == 'present':
+ if project is None:
+ project = cloud.create_project(
+ name=name, description=description,
+ domain_id=domain,
+ enabled=enabled)
+ changed = True
+ else:
+ if _needs_update(module, project):
+ project = cloud.update_project(
+ project['id'], description=description,
+ enabled=enabled)
+ changed = True
+ else:
+ changed = False
+ module.exit_json(changed=changed, project=project)
+
+ elif state == 'absent':
+ if project is None:
+ changed=False
+ else:
+ cloud.delete_project(project['id'])
+ changed=True
+ module.exit_json(changed=changed)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=e.message, extra_data=e.extra_data)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/openstack/os_project_facts.py b/lib/ansible/modules/cloud/openstack/os_project_facts.py
new file mode 100644
index 0000000000..856b6304ce
--- /dev/null
+++ b/lib/ansible/modules/cloud/openstack/os_project_facts.py
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: os_project_facts
+short_description: Retrieve facts about one or more OpenStack projects
+extends_documentation_fragment: openstack
+version_added: "2.1"
+author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
+description:
+ - Retrieve facts about a one or more OpenStack projects
+requirements:
+ - "python >= 2.6"
+ - "shade"
+options:
+ name:
+ description:
+ - Name or ID of the project
+ required: true
+ domain:
+ description:
+ - Name or ID of the domain containing the project if the cloud supports domains
+ required: false
+ default: None
+ filters:
+ description:
+ - A dictionary of meta data to use for further filtering. Elements of
+ this dictionary may be additional dictionaries.
+ required: false
+ default: None
+'''
+
+EXAMPLES = '''
+# Gather facts about previously created projects
+- os_project_facts:
+ cloud: awesomecloud
+- debug:
+ var: openstack_projects
+
+# Gather facts about a previously created project by name
+- os_project_facts:
+ cloud: awesomecloud
+ name: demoproject
+- debug:
+ var: openstack_projects
+
+# Gather facts about a previously created project in a specific domain
+- os_project_facts
+ cloud: awesomecloud
+ name: demoproject
+ domain: admindomain
+- debug:
+ var: openstack_projects
+
+# Gather facts about a previously created project in a specific domain
+ with filter
+- os_project_facts
+ cloud: awesomecloud
+ name: demoproject
+ domain: admindomain
+ filters:
+ enabled: False
+- debug:
+ var: openstack_projects
+'''
+
+
+RETURN = '''
+openstack_projects:
+ description: has all the OpenStack facts about projects
+ returned: always, but can be null
+ type: complex
+ contains:
+ id:
+ description: Unique UUID.
+ returned: success
+ type: string
+ name:
+ description: Name given to the project.
+ returned: success
+ type: string
+ description:
+ description: Description of the project
+ returned: success
+ type: string
+ enabled:
+ description: Flag to indicate if the project is enabled
+ returned: success
+ type: bool
+ domain_id:
+ description: Domain ID containing the project (keystone v3 clouds only)
+ returned: success
+ type: bool
+'''
+
+def main():
+
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=False, default=None),
+ domain=dict(required=False, default=None),
+ filters=dict(required=False, type='dict', default=None),
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ try:
+ name = module.params['name']
+ domain = module.params['domain']
+ filters = module.params['filters']
+
+ opcloud = shade.operator_cloud(**module.params)
+
+ if domain:
+ try:
+ # We assume admin is passing domain id
+ dom = opcloud.get_domain(domain)['id']
+ domain = dom
+ except:
+ # If we fail, maybe admin is passing a domain name.
+ # Note that domains have unique names, just like id.
+ dom = opcloud.search_domains(filters={'name': domain})
+ if dom:
+ domain = dom[0]['id']
+ else:
+ module.fail_json(msg='Domain name or ID does not exist')
+
+ if not filters:
+ filters = {}
+
+ filters['domain_id'] = domain
+
+ projects = opcloud.search_projects(name, filters)
+ module.exit_json(changed=False, ansible_facts=dict(
+ openstack_projects=projects))
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/openstack/os_recordset.py b/lib/ansible/modules/cloud/openstack/os_recordset.py
new file mode 100644
index 0000000000..62fa856410
--- /dev/null
+++ b/lib/ansible/modules/cloud/openstack/os_recordset.py
@@ -0,0 +1,246 @@
+#!/usr/bin/python
+# Copyright (c) 2016 Hewlett-Packard Enterprise
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+from distutils.version import StrictVersion
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: os_recordset
+short_description: Manage OpenStack DNS recordsets
+extends_documentation_fragment: openstack
+version_added: "2.2"
+author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
+description:
+ - Manage OpenStack DNS recordsets. Recordsets can be created, deleted or
+ updated. Only the I(records), I(description), and I(ttl) values
+ can be updated.
+options:
+ zone:
+ description:
+ - Zone managing the recordset
+ required: true
+ name:
+ description:
+ - Name of the recordset
+ required: true
+ recordset_type:
+ description:
+ - Recordset type
+ required: true
+ records:
+ description:
+ - List of recordset definitions
+ required: true
+ description:
+ description:
+ - Description of the recordset
+ required: false
+ default: None
+ ttl:
+ description:
+ - TTL (Time To Live) value in seconds
+ required: false
+ default: None
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Create a recordset named "www.example.net."
+- os_recordset:
+ cloud: mycloud
+ state: present
+ zone: example.net.
+ name: www
+ recordset_type: primary
+ records: ['10.1.1.1']
+ description: test recordset
+ ttl: 3600
+
+# Update the TTL on existing "www.example.net." recordset
+- os_recordset:
+ cloud: mycloud
+ state: present
+ zone: example.net.
+ name: www
+ ttl: 7200
+
+# Delete recorset named "www.example.net."
+- os_recordset:
+ cloud: mycloud
+ state: absent
+ zone: example.net.
+ name: www
+'''
+
+RETURN = '''
+recordset:
+ description: Dictionary describing the recordset.
+ returned: On success when I(state) is 'present'.
+ type: dictionary
+ contains:
+ id:
+ description: Unique recordset ID
+ type: string
+ sample: "c1c530a3-3619-46f3-b0f6-236927b2618c"
+ name:
+ description: Recordset name
+ type: string
+ sample: "www.example.net."
+ zone_id:
+ description: Zone id
+ type: string
+ sample: 9508e177-41d8-434e-962c-6fe6ca880af7
+ type:
+ description: Recordset type
+ type: string
+ sample: "A"
+ description:
+ description: Recordset description
+ type: string
+ sample: "Test description"
+ ttl:
+ description: Zone TTL value
+ type: int
+ sample: 3600
+ records:
+ description: Recordset records
+ type: list
+ sample: ['10.0.0.1']
+'''
+
+
+def _system_state_change(state, records, description, ttl, zone, recordset):
+ if state == 'present':
+ if recordset is None:
+ return True
+ if records is not None and recordset.records != records:
+ return True
+ if description is not None and recordset.description != description:
+ return True
+ if ttl is not None and recordset.ttl != ttl:
+ return True
+ if state == 'absent' and recordset:
+ return True
+ return False
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ zone=dict(required=True),
+ name=dict(required=True),
+ recordset_type=dict(required=False),
+ records=dict(required=False, type='list'),
+ description=dict(required=False, default=None),
+ ttl=dict(required=False, default=None, type='int'),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec,
+ required_if=[
+ ('state', 'present',
+ ['recordset_type', 'records'])],
+ supports_check_mode=True,
+ **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+ if StrictVersion(shade.__version__) <= StrictVersion('1.8.0'):
+ module.fail_json(msg="To utilize this module, the installed version of "
+ "the shade library MUST be >1.8.0")
+
+ zone = module.params.get('zone')
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ try:
+ cloud = shade.openstack_cloud(**module.params)
+ recordset = cloud.get_recordset(zone, name + '.' + zone)
+
+
+ if state == 'present':
+ recordset_type = module.params.get('recordset_type')
+ records = module.params.get('records')
+ description = module.params.get('description')
+ ttl = module.params.get('ttl')
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(state,
+ records, description,
+ ttl, zone,
+ recordset))
+
+ if recordset is None:
+ recordset = cloud.create_recordset(
+ zone=zone, name=name, recordset_type=recordset_type,
+ records=records, description=description, ttl=ttl)
+ changed = True
+ else:
+ if records is None:
+ records = []
+
+ pre_update_recordset = recordset
+ changed = _system_state_change(state, records,
+ description, ttl,
+ zone, pre_update_recordset)
+ if changed:
+ zone = cloud.update_recordset(
+ zone, name + '.' + zone,
+ records=records,
+ description=description,
+ ttl=ttl)
+ module.exit_json(changed=changed, recordset=recordset)
+
+ elif state == 'absent':
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(state,
+ None, None,
+ None,
+ None, recordset))
+
+ if recordset is None:
+ changed=False
+ else:
+ cloud.delete_recordset(zone, name + '.' + zone)
+ changed=True
+ module.exit_json(changed=changed)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/openstack/os_server_group.py b/lib/ansible/modules/cloud/openstack/os_server_group.py
new file mode 100644
index 0000000000..0103fef867
--- /dev/null
+++ b/lib/ansible/modules/cloud/openstack/os_server_group.py
@@ -0,0 +1,186 @@
+#!/usr/bin/python
+
+# Copyright (c) 2016 Catalyst IT Limited
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: os_server_group
+short_description: Manage OpenStack server groups
+extends_documentation_fragment: openstack
+version_added: "2.2"
+author: "Lingxian Kong (@kong)"
+description:
+ - Add or remove server groups from OpenStack.
+options:
+ state:
+ description:
+ - Indicate desired state of the resource. When I(state) is 'present',
+ then I(policies) is required.
+ choices: ['present', 'absent']
+ required: false
+ default: present
+ name:
+ description:
+ - Server group name.
+ required: true
+ policies:
+ description:
+ - A list of one or more policy names to associate with the server
+ group. The list must contain at least one policy name. The current
+ valid policy names are anti-affinity, affinity, soft-anti-affinity
+ and soft-affinity.
+ required: false
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Create a server group with 'affinity' policy.
+- os_server_group:
+ state: present
+ auth:
+ auth_url: https://api.cloud.catalyst.net.nz:5000/v2.0
+ username: admin
+ password: admin
+ project_name: admin
+ name: my_server_group
+ policies:
+ - affinity
+
+# Delete 'my_server_group' server group.
+- os_server_group:
+ state: absent
+ auth:
+ auth_url: https://api.cloud.catalyst.net.nz:5000/v2.0
+ username: admin
+ password: admin
+ project_name: admin
+ name: my_server_group
+'''
+
+RETURN = '''
+id:
+ description: Unique UUID.
+ returned: success
+ type: string
+name:
+ description: The name of the server group.
+ returned: success
+ type: string
+policies:
+ description: A list of one or more policy names of the server group.
+ returned: success
+ type: list of strings
+members:
+ description: A list of members in the server group.
+ returned: success
+ type: list of strings
+metadata:
+ description: Metadata key and value pairs.
+ returned: success
+ type: dict
+project_id:
+ description: The project ID who owns the server group.
+ returned: success
+ type: string
+user_id:
+ description: The user ID who owns the server group.
+ returned: success
+ type: string
+'''
+
+
+def _system_state_change(state, server_group):
+ if state == 'present' and not server_group:
+ return True
+ if state == 'absent' and server_group:
+ return True
+
+ return False
+
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=True),
+ policies=dict(required=False, type='list'),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(
+ argument_spec,
+ supports_check_mode=True,
+ **module_kwargs
+ )
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ name = module.params['name']
+ policies = module.params['policies']
+ state = module.params['state']
+
+ try:
+ cloud = shade.openstack_cloud(**module.params)
+ server_group = cloud.get_server_group(name)
+
+ if module.check_mode:
+ module.exit_json(
+ changed=_system_state_change(state, server_group)
+ )
+
+ changed = False
+ if state == 'present':
+ if not server_group:
+ if not policies:
+ module.fail_json(
+ msg="Parameter 'policies' is required in Server Group "
+ "Create"
+ )
+ server_group = cloud.create_server_group(name, policies)
+ changed = True
+
+ module.exit_json(
+ changed=changed,
+ id=server_group['id'],
+ server_group=server_group
+ )
+ if state == 'absent':
+ if server_group:
+ cloud.delete_server_group(server_group['id'])
+ changed = True
+ module.exit_json(changed=changed)
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e), extra_data=e.extra_data)
+
+
+# this is magic, see lib/ansible/module_common.py
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/openstack/os_stack.py b/lib/ansible/modules/cloud/openstack/os_stack.py
new file mode 100644
index 0000000000..fc42b62112
--- /dev/null
+++ b/lib/ansible/modules/cloud/openstack/os_stack.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+#coding: utf-8 -*-
+
+# (c) 2016, Mathieu Bultel <mbultel@redhat.com>
+# (c) 2016, Steve Baker <sbaker@redhat.com>
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+from time import sleep
+from distutils.version import StrictVersion
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: os_stack
+short_description: Add/Remove Heat Stack
+extends_documentation_fragment: openstack
+version_added: "2.2"
+author: "Mathieu Bultel (matbu), Steve Baker (steveb)"
+description:
+ - Add or Remove a Stack to an OpenStack Heat
+options:
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ required: false
+ default: present
+ name:
+ description:
+ - Name of the stack that should be created, name could be char and digit, no space
+ required: true
+ template:
+ description:
+ - Path of the template file to use for the stack creation
+ required: false
+ default: None
+ environment:
+ description:
+ - List of environment files that should be used for the stack creation
+ required: false
+ default: None
+ parameters:
+ description:
+ - Dictionary of parameters for the stack creation
+ required: false
+ default: None
+ rollback:
+ description:
+ - Rollback stack creation
+ required: false
+ default: false
+ timeout:
+ description:
+ - Maximum number of seconds to wait for the stack creation
+ required: false
+ default: 3600
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+EXAMPLES = '''
+---
+- name: create stack
+ ignore_errors: True
+ register: stack_create
+ os_stack:
+ name: "{{ stack_name }}"
+ state: present
+ template: "/path/to/my_stack.yaml"
+ environment:
+ - /path/to/resource-registry.yaml
+ - /path/to/environment.yaml
+ parameters:
+ bmc_flavor: m1.medium
+ bmc_image: CentOS
+ key_name: default
+ private_net: {{ private_net_param }}
+ node_count: 2
+ name: undercloud
+ image: CentOS
+ my_flavor: m1.large
+ external_net: {{ external_net_param }}
+'''
+
+RETURN = '''
+id:
+ description: Stack ID.
+ type: string
+ sample: "97a3f543-8136-4570-920e-fd7605c989d6"
+
+stack:
+ action:
+ description: Action, could be Create or Update.
+ type: string
+ sample: "CREATE"
+ creation_time:
+ description: Time when the action has been made.
+ type: string
+ sample: "2016-07-05T17:38:12Z"
+ description:
+ description: Description of the Stack provided in the heat template.
+ type: string
+ sample: "HOT template to create a new instance and networks"
+ id:
+ description: Stack ID.
+ type: string
+ sample: "97a3f543-8136-4570-920e-fd7605c989d6"
+ name:
+ description: Name of the Stack
+ type: string
+ sample: "test-stack"
+ identifier:
+ description: Identifier of the current Stack action.
+ type: string
+ sample: "test-stack/97a3f543-8136-4570-920e-fd7605c989d6"
+ links:
+ description: Links to the current Stack.
+ type: list of dict
+ sample: "[{'href': 'http://foo:8004/v1/7f6a/stacks/test-stack/97a3f543-8136-4570-920e-fd7605c989d6']"
+ outputs:
+ description: Output returned by the Stack.
+ type: list of dict
+ sample: "{'description': 'IP address of server1 in private network',
+ 'output_key': 'server1_private_ip',
+ 'output_value': '10.1.10.103'}"
+ parameters:
+ description: Parameters of the current Stack
+ type: dict
+ sample: "{'OS::project_id': '7f6a3a3e01164a4eb4eecb2ab7742101',
+ 'OS::stack_id': '97a3f543-8136-4570-920e-fd7605c989d6',
+ 'OS::stack_name': 'test-stack',
+ 'stack_status': 'CREATE_COMPLETE',
+ 'stack_status_reason': 'Stack CREATE completed successfully',
+ 'status': 'COMPLETE',
+ 'template_description': 'HOT template to create a new instance and networks',
+ 'timeout_mins': 60,
+ 'updated_time': null}"
+'''
+
+def _create_stack(module, stack, cloud):
+ try:
+ stack = cloud.create_stack(module.params['name'],
+ template_file=module.params['template'],
+ environment_files=module.params['environment'],
+ timeout=module.params['timeout'],
+ wait=True,
+ rollback=module.params['rollback'],
+ **module.params['parameters'])
+
+ stack = cloud.get_stack(stack.id, None)
+ if stack.stack_status == 'CREATE_COMPLETE':
+ return stack
+ else:
+ return False
+ module.fail_json(msg = "Failure in creating stack: ".format(stack))
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+def _update_stack(module, stack, cloud):
+ try:
+ stack = cloud.update_stack(
+ module.params['name'],
+ template_file=module.params['template'],
+ environment_files=module.params['environment'],
+ timeout=module.params['timeout'],
+ rollback=module.params['rollback'],
+ wait=module.params['wait'],
+ **module.params['parameters'])
+
+ if stack['stack_status'] == 'UPDATE_COMPLETE':
+ return stack
+ else:
+ module.fail_json(msg = "Failure in updating stack: %s" %
+ stack['stack_status_reason'])
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+def _system_state_change(module, stack, cloud):
+ state = module.params['state']
+ if state == 'present':
+ if not stack:
+ return True
+ if state == 'absent' and stack:
+ return True
+ return False
+
+def main():
+
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=True),
+ template=dict(default=None),
+ environment=dict(default=None, type='list'),
+ parameters=dict(default={}, type='dict'),
+ rollback=dict(default=False, type='bool'),
+ timeout=dict(default=3600, type='int'),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True,
+ **module_kwargs)
+
+ # stack API introduced in 1.8.0
+ if not HAS_SHADE or (StrictVersion(shade.__version__) < StrictVersion('1.8.0')):
+ module.fail_json(msg='shade 1.8.0 or higher is required for this module')
+
+ state = module.params['state']
+ name = module.params['name']
+ # Check for required parameters when state == 'present'
+ if state == 'present':
+ for p in ['template']:
+ if not module.params[p]:
+ module.fail_json(msg='%s required with present state' % p)
+
+ try:
+ cloud = shade.openstack_cloud(**module.params)
+ stack = cloud.get_stack(name)
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(module, stack,
+ cloud))
+
+ if state == 'present':
+ if not stack:
+ stack = _create_stack(module, stack, cloud)
+ else:
+ stack = _update_stack(module, stack, cloud)
+ changed = True
+ module.exit_json(changed=changed,
+ stack=stack,
+ id=stack.id)
+ elif state == 'absent':
+ if not stack:
+ changed = False
+ else:
+ changed = True
+ if not cloud.delete_stack(name, wait=module.params['wait']):
+ module.fail_json(msg='delete stack failed for stack: %s' % name)
+ module.exit_json(changed=changed)
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/openstack/os_user_facts.py b/lib/ansible/modules/cloud/openstack/os_user_facts.py
new file mode 100644
index 0000000000..52af5b8e62
--- /dev/null
+++ b/lib/ansible/modules/cloud/openstack/os_user_facts.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: os_user_facts
+short_description: Retrieve facts about one or more OpenStack users
+extends_documentation_fragment: openstack
+version_added: "2.1"
+author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
+description:
+ - Retrieve facts about a one or more OpenStack users
+requirements:
+ - "python >= 2.6"
+ - "shade"
+options:
+ name:
+ description:
+ - Name or ID of the user
+ required: true
+ domain:
+ description:
+ - Name or ID of the domain containing the user if the cloud supports domains
+ required: false
+ default: None
+ filters:
+ description:
+ - A dictionary of meta data to use for further filtering. Elements of
+ this dictionary may be additional dictionaries.
+ required: false
+ default: None
+'''
+
+EXAMPLES = '''
+# Gather facts about previously created users
+- os_user_facts:
+ cloud: awesomecloud
+- debug:
+ var: openstack_users
+
+# Gather facts about a previously created user by name
+- os_user_facts:
+ cloud: awesomecloud
+ name: demouser
+- debug:
+ var: openstack_users
+
+# Gather facts about a previously created user in a specific domain
+- os_user_facts
+ cloud: awesomecloud
+ name: demouser
+ domain: admindomain
+- debug:
+ var: openstack_users
+
+# Gather facts about a previously created user in a specific domain
+ with filter
+- os_user_facts
+ cloud: awesomecloud
+ name: demouser
+ domain: admindomain
+ filters:
+ enabled: False
+- debug:
+ var: openstack_users
+'''
+
+
+RETURN = '''
+openstack_users:
+ description: has all the OpenStack facts about users
+ returned: always, but can be null
+ type: complex
+ contains:
+ id:
+ description: Unique UUID.
+ returned: success
+ type: string
+ name:
+ description: Name given to the user.
+ returned: success
+ type: string
+ enabled:
+ description: Flag to indicate if the user is enabled
+ returned: success
+ type: bool
+ domain_id:
+ description: Domain ID containing the user
+ returned: success
+ type: string
+ default_project_id:
+ description: Default project ID of the user
+ returned: success
+ type: string
+ email:
+ description: Email of the user
+ returned: success
+ type: string
+ username:
+ description: Username of the user
+ returned: success
+ type: string
+'''
+
+def main():
+
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=False, default=None),
+ domain=dict(required=False, default=None),
+ filters=dict(required=False, type='dict', default=None),
+ )
+
+ module = AnsibleModule(argument_spec)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ try:
+ name = module.params['name']
+ domain = module.params['domain']
+ filters = module.params['filters']
+
+ opcloud = shade.operator_cloud(**module.params)
+
+ if domain:
+ try:
+ # We assume admin is passing domain id
+ dom = opcloud.get_domain(domain)['id']
+ domain = dom
+ except:
+ # If we fail, maybe admin is passing a domain name.
+ # Note that domains have unique names, just like id.
+ dom = opcloud.search_domains(filters={'name': domain})
+ if dom:
+ domain = dom[0]['id']
+ else:
+ module.fail_json(msg='Domain name or ID does not exist')
+
+ if not filters:
+ filters = {}
+
+ filters['domain_id'] = domain
+
+ users = opcloud.search_users(name,
+ filters)
+ module.exit_json(changed=False, ansible_facts=dict(
+ openstack_users=users))
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/openstack/os_user_role.py b/lib/ansible/modules/cloud/openstack/os_user_role.py
new file mode 100644
index 0000000000..41b0b73e07
--- /dev/null
+++ b/lib/ansible/modules/cloud/openstack/os_user_role.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+# Copyright (c) 2016 IBM
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+from distutils.version import StrictVersion
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: os_user_role
+short_description: Associate OpenStack Identity users and roles
+extends_documentation_fragment: openstack
+author: "Monty Taylor (@emonty), David Shrewsbury (@Shrews)"
+version_added: "2.1"
+description:
+ - Grant and revoke roles in either project or domain context for
+ OpenStack Identity Users.
+options:
+ role:
+ description:
+ - Name or ID for the role.
+ required: true
+ user:
+ description:
+ - Name or ID for the user. If I(user) is not specified, then
+ I(group) is required. Both may not be specified.
+ required: false
+ default: null
+ group:
+ description:
+ - Name or ID for the group. Valid only with keystone version 3.
+ If I(group) is not specified, then I(user) is required. Both
+ may not be specified.
+ required: false
+ default: null
+ project:
+ description:
+ - Name or ID of the project to scope the role assocation to.
+ If you are using keystone version 2, then this value is required.
+ required: false
+ default: null
+ domain:
+ description:
+ - ID of the domain to scope the role association to. Valid only with
+ keystone version 3, and required if I(project) is not specified.
+ required: false
+ default: null
+ state:
+ description:
+ - Should the roles be present or absent on the user.
+ choices: [present, absent]
+ default: present
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Grant an admin role on the user admin in the project project1
+- os_user_role:
+ cloud: mycloud
+ user: admin
+ role: admin
+ project: project1
+
+# Revoke the admin role from the user barney in the newyork domain
+- os_user_role:
+ cloud: mycloud
+ state: absent
+ user: barney
+ role: admin
+ domain: newyork
+'''
+
+RETURN = '''
+#
+'''
+
+def _system_state_change(state, assignment):
+ if state == 'present' and not assignment:
+ return True
+ elif state == 'absent' and assignment:
+ return True
+ return False
+
+
+def _build_kwargs(user, group, project, domain):
+ kwargs = {}
+ if user:
+ kwargs['user'] = user
+ if group:
+ kwargs['group'] = group
+ if project:
+ kwargs['project'] = project
+ if domain:
+ kwargs['domain'] = domain
+ return kwargs
+
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ role=dict(required=True),
+ user=dict(required=False),
+ group=dict(required=False),
+ project=dict(required=False),
+ domain=dict(required=False),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = openstack_module_kwargs(
+ required_one_of=[
+ ['user', 'group']
+ ])
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True,
+ **module_kwargs)
+
+ # role grant/revoke API introduced in 1.5.0
+ if not HAS_SHADE or (StrictVersion(shade.__version__) < StrictVersion('1.5.0')):
+ module.fail_json(msg='shade 1.5.0 or higher is required for this module')
+
+ role = module.params.pop('role')
+ user = module.params.pop('user')
+ group = module.params.pop('group')
+ project = module.params.pop('project')
+ domain = module.params.pop('domain')
+ state = module.params.pop('state')
+
+ try:
+ cloud = shade.operator_cloud(**module.params)
+
+ filters = {}
+
+ r = cloud.get_role(role)
+ if r is None:
+ module.fail_json(msg="Role %s is not valid" % role)
+ filters['role'] = r['id']
+
+ if user:
+ u = cloud.get_user(user)
+ if u is None:
+ module.fail_json(msg="User %s is not valid" % user)
+ filters['user'] = u['id']
+ if group:
+ g = cloud.get_group(group)
+ if g is None:
+ module.fail_json(msg="Group %s is not valid" % group)
+ filters['group'] = g['id']
+ if domain:
+ d = cloud.get_domain(domain)
+ if d is None:
+ module.fail_json(msg="Domain %s is not valid" % domain)
+ filters['domain'] = d['id']
+ if project:
+ if domain:
+ p = cloud.get_project(project, domain_id=filters['domain'])
+ else:
+ p = cloud.get_project(project)
+
+ if p is None:
+ module.fail_json(msg="Project %s is not valid" % project)
+ filters['project'] = p['id']
+
+ assignment = cloud.list_role_assignments(filters=filters)
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(state, assignment))
+
+ changed = False
+
+ if state == 'present':
+ if not assignment:
+ kwargs = _build_kwargs(user, group, project, domain)
+ cloud.grant_role(role, **kwargs)
+ changed = True
+
+ elif state == 'absent':
+ if assignment:
+ kwargs = _build_kwargs(user, group, project, domain)
+ cloud.revoke_role(role, **kwargs)
+ changed=True
+
+ module.exit_json(changed=changed)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/openstack/os_zone.py b/lib/ansible/modules/cloud/openstack/os_zone.py
new file mode 100644
index 0000000000..a733d80ab2
--- /dev/null
+++ b/lib/ansible/modules/cloud/openstack/os_zone.py
@@ -0,0 +1,241 @@
+#!/usr/bin/python
+# Copyright (c) 2016 Hewlett-Packard Enterprise
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see <http://www.gnu.org/licenses/>.
+
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+from distutils.version import StrictVersion
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: os_zone
+short_description: Manage OpenStack DNS zones
+extends_documentation_fragment: openstack
+version_added: "2.2"
+author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
+description:
+ - Manage OpenStack DNS zones. Zones can be created, deleted or
+ updated. Only the I(email), I(description), I(ttl) and I(masters) values
+ can be updated.
+options:
+ name:
+ description:
+ - Zone name
+ required: true
+ zone_type:
+ description:
+ - Zone type
+ choices: [primary, secondary]
+ default: None
+ email:
+ description:
+ - Email of the zone owner (only applies if zone_type is primary)
+ required: false
+ description:
+ description:
+ - Zone description
+ required: false
+ default: None
+ ttl:
+ description:
+ - TTL (Time To Live) value in seconds
+ required: false
+ default: None
+ masters:
+ description:
+ - Master nameservers (only applies if zone_type is secondary)
+ required: false
+ default: None
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+requirements:
+ - "python >= 2.6"
+ - "shade"
+'''
+
+EXAMPLES = '''
+# Create a zone named "example.net"
+- os_zone:
+ cloud: mycloud
+ state: present
+ name: example.net.
+ zone_type: primary
+ email: test@example.net
+ description: Test zone
+ ttl: 3600
+
+# Update the TTL on existing "example.net." zone
+- os_zone:
+ cloud: mycloud
+ state: present
+ name: example.net.
+ ttl: 7200
+
+# Delete zone named "example.net."
+- os_zone:
+ cloud: mycloud
+ state: absent
+ name: example.net.
+'''
+
+RETURN = '''
+zone:
+ description: Dictionary describing the zone.
+ returned: On success when I(state) is 'present'.
+ type: dictionary
+ contains:
+ id:
+ description: Unique zone ID
+ type: string
+ sample: "c1c530a3-3619-46f3-b0f6-236927b2618c"
+ name:
+ description: Zone name
+ type: string
+ sample: "example.net."
+ type:
+ description: Zone type
+ type: string
+ sample: "PRIMARY"
+ email:
+ description: Zone owner email
+ type: string
+ sample: "test@example.net"
+ description:
+ description: Zone description
+ type: string
+ sample: "Test description"
+ ttl:
+ description: Zone TTL value
+ type: int
+ sample: 3600
+ masters:
+ description: Zone master nameservers
+ type: list
+ sample: []
+'''
+
+
+def _system_state_change(state, email, description, ttl, masters, zone):
+ if state == 'present':
+ if not zone:
+ return True
+ if email is not None and zone.email != email:
+ return True
+ if description is not None and zone.description != description:
+ return True
+ if ttl is not None and zone.ttl != ttl:
+ return True
+ if masters is not None and zone.masters != masters:
+ return True
+ if state == 'absent' and zone:
+ return True
+ return False
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=True),
+ zone_type=dict(required=False, choice=['primary', 'secondary']),
+ email=dict(required=False, default=None),
+ description=dict(required=False, default=None),
+ ttl=dict(required=False, default=None, type='int'),
+ masters=dict(required=False, default=None, type='list'),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True,
+ **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+ if StrictVersion(shade.__version__) < StrictVersion('1.8.0'):
+ module.fail_json(msg="To utilize this module, the installed version of"
+ "the shade library MUST be >=1.8.0")
+
+ name = module.params.get('name')
+ state = module.params.get('state')
+
+ try:
+ cloud = shade.openstack_cloud(**module.params)
+ zone = cloud.get_zone(name)
+
+
+ if state == 'present':
+ zone_type = module.params.get('zone_type')
+ email = module.params.get('email')
+ description = module.params.get('description')
+ ttl = module.params.get('ttl')
+ masters = module.params.get('masters')
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(state, email,
+ description, ttl,
+ masters, zone))
+
+ if zone is None:
+ zone = cloud.create_zone(
+ name=name, zone_type=zone_type, email=email,
+ description=description, ttl=ttl, masters=masters)
+ changed = True
+ else:
+ if masters is None:
+ masters = []
+
+ pre_update_zone = zone
+ changed = _system_state_change(state, email,
+ description, ttl,
+ masters, pre_update_zone)
+ if changed:
+ zone = cloud.update_zone(
+ name, email=email,
+ description=description,
+ ttl=ttl, masters=masters)
+ module.exit_json(changed=changed, zone=zone)
+
+ elif state == 'absent':
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(state, None,
+ None, None,
+ None, zone))
+
+ if zone is None:
+ changed=False
+ else:
+ cloud.delete_zone(name)
+ changed=True
+ module.exit_json(changed=changed)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/ovh/__init__.py b/lib/ansible/modules/cloud/ovh/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovh/__init__.py
diff --git a/lib/ansible/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py b/lib/ansible/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py
new file mode 100644
index 0000000000..3499e73a92
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovh/ovh_ip_loadbalancing_backend.py
@@ -0,0 +1,316 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovh_ip_loadbalancing_backend
+short_description: Manage OVH IP LoadBalancing backends
+description:
+ - Manage OVH (French European hosting provider) LoadBalancing IP backends
+version_added: "2.2"
+author: Pascal HERAUD @pascalheraud
+notes:
+ - Uses the python OVH Api U(https://github.com/ovh/python-ovh).
+ You have to create an application (a key and secret) with a consummer
+ key as described into U(https://eu.api.ovh.com/g934.first_step_with_api)
+requirements:
+ - ovh > 0.3.5
+options:
+ name:
+ required: true
+ description:
+ - Name of the LoadBalancing internal name (ip-X.X.X.X)
+ backend:
+ required: true
+ description:
+ - The IP address of the backend to update / modify / delete
+ state:
+ required: false
+ default: present
+ choices: ['present', 'absent']
+ description:
+ - Determines wether the backend is to be created/modified
+ or deleted
+ probe:
+ required: false
+ default: none
+ choices: ['none', 'http', 'icmp' , 'oco']
+ description:
+ - Determines the type of probe to use for this backend
+ weight:
+ required: false
+ default: 8
+ description:
+ - Determines the weight for this backend
+ endpoint:
+ required: true
+ description:
+ - The endpoint to use ( for instance ovh-eu)
+ application_key:
+ required: true
+ description:
+ - The applicationKey to use
+ application_secret:
+ required: true
+ description:
+ - The application secret to use
+ consumer_key:
+ required: true
+ description:
+ - The consumer key to use
+ timeout:
+ required: false
+ type: "int"
+ default: 120
+ description:
+ - The timeout in seconds used to wait for a task to be
+ completed. Default is 120 seconds.
+
+'''
+
+EXAMPLES = '''
+# Adds or modify the backend '212.1.1.1' to a
+# loadbalancing 'ip-1.1.1.1'
+- ovh_ip_loadbalancing:
+ name: ip-1.1.1.1
+ backend: 212.1.1.1
+ state: present
+ probe: none
+ weight: 8
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+
+# Removes a backend '212.1.1.1' from a loadbalancing 'ip-1.1.1.1'
+- ovh_ip_loadbalancing:
+ name: ip-1.1.1.1
+ backend: 212.1.1.1
+ state: absent
+ endpoint: ovh-eu
+ application_key: yourkey
+ application_secret: yoursecret
+ consumer_key: yourconsumerkey
+'''
+
+RETURN = '''
+'''
+
+import time
+try:
+ import ovh
+ import ovh.exceptions
+ from ovh.exceptions import APIError
+ HAS_OVH = True
+except ImportError:
+ HAS_OVH = False
+
+def getOvhClient(ansibleModule):
+ endpoint = ansibleModule.params.get('endpoint')
+ application_key = ansibleModule.params.get('application_key')
+ application_secret = ansibleModule.params.get('application_secret')
+ consumer_key = ansibleModule.params.get('consumer_key')
+
+ return ovh.Client(
+ endpoint=endpoint,
+ application_key=application_key,
+ application_secret=application_secret,
+ consumer_key=consumer_key
+ )
+
+
+def waitForNoTask(client, name, timeout):
+ currentTimeout = timeout
+ while len(client.get('/ip/loadBalancing/{0}/task'.format(name))) > 0:
+ time.sleep(1) # Delay for 1 sec
+ currentTimeout -= 1
+ if currentTimeout < 0:
+ return False
+ return True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(required=True),
+ backend=dict(required=True),
+ weight=dict(default=8, type='int'),
+ probe=dict(default='none',
+ choices=['none', 'http', 'icmp', 'oco']),
+ state=dict(default='present', choices=['present', 'absent']),
+ endpoint=dict(required=True),
+ application_key=dict(required=True, no_log=True),
+ application_secret=dict(required=True, no_log=True),
+ consumer_key=dict(required=True, no_log=True),
+ timeout=dict(default=120, type='int')
+ )
+ )
+
+ if not HAS_OVH:
+ module.fail_json(msg='ovh-api python module'
+ 'is required to run this module ')
+
+ # Get parameters
+ name = module.params.get('name')
+ state = module.params.get('state')
+ backend = module.params.get('backend')
+ weight = long(module.params.get('weight'))
+ probe = module.params.get('probe')
+ timeout = module.params.get('timeout')
+
+ # Connect to OVH API
+ client = getOvhClient(module)
+
+ # Check that the load balancing exists
+ try:
+ loadBalancings = client.get('/ip/loadBalancing')
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of loadBalancing, '
+ 'check application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+
+ if name not in loadBalancings:
+ module.fail_json(msg='IP LoadBalancing {0} does not exist'.format(name))
+
+ # Check that no task is pending before going on
+ try:
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for no pending '
+ 'tasks before executing the module '.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of pending tasks '
+ 'of the loadBalancing, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ try:
+ backends = client.get('/ip/loadBalancing/{0}/backend'.format(name))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the list of backends '
+ 'of the loadBalancing, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ backendExists = backend in backends
+ moduleChanged = False
+ if state == "absent":
+ if backendExists:
+ # Remove backend
+ try:
+ client.delete(
+ '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of removing backend task'.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for deleting the backend, '
+ 'check application key, secret, consumerkey and '
+ 'parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+ moduleChanged = True
+ else:
+ if backendExists:
+ # Get properties
+ try:
+ backendProperties = client.get(
+ '/ip/loadBalancing/{0}/backend/{1}'.format(name, backend))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for getting the backend properties, '
+ 'check application key, secret, consumerkey and '
+ 'parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if (backendProperties['weight'] != weight):
+ # Change weight
+ try:
+ client.post(
+ '/ip/loadBalancing/{0}/backend/{1}/setWeight'
+ .format(name, backend), weight=weight)
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion '
+ 'of setWeight to backend task'
+ .format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for updating the weight of the '
+ 'backend, check application key, secret, consumerkey '
+ 'and parameters. Error returned by OVH api was : {0}'
+ .format(apiError))
+ moduleChanged = True
+
+ if (backendProperties['probe'] != probe):
+ # Change probe
+ backendProperties['probe'] = probe
+ try:
+ client.put(
+ '/ip/loadBalancing/{0}/backend/{1}'
+ .format(name, backend), probe=probe)
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion of '
+ 'setProbe to backend task'
+ .format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for updating the propbe of '
+ 'the backend, check application key, secret, '
+ 'consumerkey and parameters. Error returned by OVH api '
+ 'was : {0}'
+ .format(apiError))
+ moduleChanged = True
+
+ else:
+ # Creates backend
+ try:
+ try:
+ client.post('/ip/loadBalancing/{0}/backend'.format(name),
+ ipBackend=backend, probe=probe, weight=weight)
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for creating the backend, check '
+ 'application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'
+ .format(apiError))
+
+ if not waitForNoTask(client, name, timeout):
+ module.fail_json(
+ msg='Timeout of {0} seconds while waiting for completion of '
+ 'backend creation task'.format(timeout))
+ except APIError as apiError:
+ module.fail_json(
+ msg='Unable to call OVH api for creating the backend, check '
+ 'application key, secret, consumerkey and parameters. '
+ 'Error returned by OVH api was : {0}'.format(apiError))
+ moduleChanged = True
+
+ module.exit_json(changed=moduleChanged)
+
+# import module snippets
+from ansible.module_utils.basic import AnsibleModule
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/__init__.py b/lib/ansible/modules/cloud/ovirt/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/__init__.py
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_affinity_labels.py b/lib/ansible/modules/cloud/ovirt/ovirt_affinity_labels.py
new file mode 100644
index 0000000000..5a680f9297
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_affinity_labels.py
@@ -0,0 +1,207 @@
+#!/usr/bin/pythonapi/
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from collections import defaultdict
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ ovirt_full_argument_spec,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_affinity_labels
+short_description: Module to affinity labels in oVirt
+version_added: "2.3"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "This module manage affinity labels in oVirt. It can also manage assignments
+ of those labels to hosts and VMs."
+options:
+ name:
+ description:
+ - "Name of the the affinity label to manage."
+ required: true
+ state:
+ description:
+ - "Should the affinity label be present or absent."
+ choices: ['present', 'absent']
+ default: present
+ cluster:
+ description:
+ - "Name of the cluster where vms and hosts resides."
+ vms:
+ description:
+ - "List of the VMs names, which should have assigned this affinity label."
+ hosts:
+ description:
+ - "List of the hosts names, which should have assigned this affinity label."
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create(if not exists) and assign affinity label to vms vm1 and vm2 and host host1
+- ovirt_affinity_labels:
+ name: mylabel
+ cluster: mycluster
+ vms:
+ - vm1
+ - vm2
+ hosts:
+ - host1
+
+# To detach all VMs from label
+- ovirt_affinity_labels:
+ name: mylabel
+ cluster: mycluster
+ vms: []
+
+# Remove affinity label
+- ovirt_affinity_labels:
+ state: absent
+ name: mylabel
+'''
+
+RETURN = '''
+id:
+ description: ID of the affinity label which is managed
+ returned: On success if affinity label is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+template:
+ description: "Dictionary of all the affinity label attributes. Affinity label attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/affinity_label."
+ returned: On success if affinity label is found.
+'''
+
+
+class AffinityLabelsModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.AffinityLabel(name=self._module.params['name'])
+
+ def post_create(self, entity):
+ self.update_check(entity)
+
+ def pre_remove(self, entity):
+ self._module.params['vms'] = []
+ self._module.params['hosts'] = []
+ self.update_check(entity)
+
+ def _update_label_assignments(self, entity, name, label_obj_type):
+ objs_service = getattr(self._connection.system_service(), '%s_service' % name)()
+ if self._module.params[name] is not None:
+ objs = self._connection.follow_link(getattr(entity, name))
+ objs_names = defaultdict(list)
+ for obj in objs:
+ labeled_entity = objs_service.service(obj.id).get()
+ if self._module.params['cluster'] is None:
+ objs_names[labeled_entity.name].append(obj.id)
+ elif self._connection.follow_link(labeled_entity.cluster).name == self._module.params['cluster']:
+ objs_names[labeled_entity.name].append(obj.id)
+
+ for obj in self._module.params[name]:
+ if obj not in objs_names:
+ for obj_id in objs_service.list(
+ search='name=%s and cluster=%s' % (obj, self._module.params['cluster'])
+ ):
+ label_service = getattr(self._service.service(entity.id), '%s_service' % name)()
+ if not self._module.check_mode:
+ label_service.add(**{
+ name[:-1]: label_obj_type(id=obj_id.id)
+ })
+ self.changed = True
+
+ for obj in objs_names:
+ if obj not in self._module.params[name]:
+ label_service = getattr(self._service.service(entity.id), '%s_service' % name)()
+ if not self._module.check_mode:
+ for obj_id in objs_names[obj]:
+ label_service.service(obj_id).remove()
+ self.changed = True
+
+ def update_check(self, entity):
+ self._update_label_assignments(entity, 'vms', otypes.Vm)
+ self._update_label_assignments(entity, 'hosts', otypes.Host)
+ return True
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ cluster=dict(default=None),
+ name=dict(default=None, required=True),
+ vms=dict(default=None, type='list'),
+ hosts=dict(default=None, type='list'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=[
+ ('state', 'present', ['cluster']),
+ ],
+ )
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ affinity_labels_service = connection.system_service().affinity_labels_service()
+ affinity_labels_module = AffinityLabelsModule(
+ connection=connection,
+ module=module,
+ service=affinity_labels_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = affinity_labels_module.create()
+ elif state == 'absent':
+ ret = affinity_labels_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_affinity_labels_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_affinity_labels_facts.py
new file mode 100644
index 0000000000..0708b7d880
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_affinity_labels_facts.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_full_argument_spec,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_affinity_labels_facts
+short_description: Retrieve facts about one or more oVirt affinity labels
+author: "Ondra Machacek (@machacekondra)"
+version_added: "2.3"
+description:
+ - "Retrieve facts about one or more oVirt affinity labels."
+notes:
+ - "This module creates a new top-level C(affinity_labels) fact, which
+ contains a list of affinity labels."
+options:
+ name:
+ description:
+ - "Name of the affinity labels which should be listed."
+ vm:
+ description:
+ - "Name of the VM, which affinity labels should be listed."
+ host:
+ description:
+ - "Name of the host, which affinity labels should be listed."
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather facts about all affinity labels, which names start with C(label):
+- ovirt_affinity_labels_facts:
+ name: label*
+- debug:
+ var: affinity_labels
+
+# Gather facts about all affinity labels, which are assigned to VMs
+# which names start with C(postgres):
+- ovirt_affinity_labels_facts:
+ vm: postgres*
+- debug:
+ var: affinity_labels
+
+# Gather facts about all affinity labels, which are assigned to hosts
+# which names start with C(west):
+- ovirt_affinity_labels_facts:
+ host: west*
+- debug:
+ var: affinity_labels
+
+# Gather facts about all affinity labels, which are assigned to hosts
+# which names start with C(west) or VMs which names start with C(postgres):
+- ovirt_affinity_labels_facts:
+ host: west*
+ vm: postgres*
+- debug:
+ var: affinity_labels
+'''
+
+RETURN = '''
+ovirt_vms:
+ description: "List of dictionaries describing the affinity labels. Affinity labels attribues are mapped to dictionary keys,
+ all affinity labels attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/affinity_label."
+ returned: On success.
+ type: list
+'''
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ name=dict(default=None),
+ host=dict(default=None),
+ vm=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ affinity_labels_service = connection.system_service().affinity_labels_service()
+ labels = []
+ all_labels = affinity_labels_service.list()
+ if module.params['name']:
+ labels.extend([
+ l for l in all_labels
+ if fnmatch.fnmatch(l.name, module.params['name'])
+ ])
+ if module.params['host']:
+ hosts_service = connection.system_service().hosts_service()
+ labels.extend([
+ label
+ for label in all_labels
+ for host in connection.follow_link(label.hosts)
+ if fnmatch.fnmatch(hosts_service.service(host.id).get().name, module.params['host'])
+ ])
+ if module.params['vm']:
+ vms_service = connection.system_service().vms_service()
+ labels.extend([
+ label
+ for label in all_labels
+ for vm in connection.follow_link(label.vms)
+ if fnmatch.fnmatch(vms_service.service(vm.id).get().name, module.params['vm'])
+ ])
+
+ if not (module.params['vm'] or module.params['host'] or module.params['name']):
+ labels = all_labels
+
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ affinity_labels=[
+ get_dict_of_struct(l) for l in labels
+ ],
+ ),
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_auth.py b/lib/ansible/modules/cloud/ovirt/ovirt_auth.py
new file mode 100644
index 0000000000..6f43fe8d02
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_auth.py
@@ -0,0 +1,234 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+try:
+ import ovirtsdk4 as sdk
+except ImportError:
+ pass
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_auth
+short_description: "Module to manage authentication to oVirt."
+author: "Ondra Machacek (@machacekondra)"
+version_added: "2.2"
+description:
+ - "This module authenticates to oVirt engine and creates SSO token, which should be later used in
+ all other oVirt modules, so all modules don't need to perform login and logout.
+ This module returns an Ansible fact called I(ovirt_auth). Every module can use this
+ fact as C(auth) parameter, to perform authentication."
+options:
+ state:
+ default: present
+ choices: ['present', 'absent']
+ description:
+ - "Specifies if a token should be created or revoked."
+ username:
+ required: True
+ description:
+ - "The name of the user. For example: I(admin@internal)."
+ password:
+ required: True
+ description:
+ - "The password of the user."
+ url:
+ required: True
+ description:
+ - "A string containing the base URL of the server.
+ For example: I(https://server.example.com/ovirt-engine/api)."
+ insecure:
+ required: False
+ description:
+ - "A boolean flag that indicates if the server TLS certificate and host name should be checked."
+ ca_file:
+ required: False
+ description:
+ - "A PEM file containing the trusted CA certificates. The
+ certificate presented by the server will be verified using these CA
+ certificates. If C(ca_file) parameter is not set, system wide
+ CA certificate store is used."
+ timeout:
+ required: False
+ description:
+ - "The maximum total time to wait for the response, in
+ seconds. A value of zero (the default) means wait forever. If
+ the timeout expires before the response is received an exception
+ will be raised."
+ compress:
+ required: False
+ description:
+ - "A boolean flag indicating if the SDK should ask
+ the server to send compressed responses. The default is I(True).
+ Note that this is a hint for the server, and that it may return
+ uncompressed data even when this parameter is set to I(True)."
+ kerberos:
+ required: False
+ description:
+ - "A boolean flag indicating if Kerberos authentication
+ should be used instead of the default basic authentication."
+notes:
+ - "Everytime you use ovirt_auth module to obtain ticket, you need to also revoke the ticket,
+ when you no longer need it, otherwise the ticket would be revoked by engine when it expires.
+ For an example of how to achieve that, please take a look at I(examples) section."
+'''
+
+EXAMPLES = '''
+tasks:
+ - block:
+ # Create a vault with `ovirt_password` variable which store your
+ # oVirt user's password, and include that yaml file with variable:
+ - include_vars: ovirt_password.yml
+
+ - name: Obtain SSO token with using username/password credentials:
+ ovirt_auth:
+ url: https://ovirt.example.com/ovirt-engine/api
+ username: admin@internal
+ ca_file: ca.pem
+ password: "{{ ovirt_password }}"
+
+ # Previous task generated I(ovirt_auth) fact, which you can later use
+ # in different modules as follows:
+ - ovirt_vms:
+ auth: "{{ ovirt_auth }}"
+ state: absent
+ name: myvm
+
+ always:
+ - name: Always revoke the SSO token
+ ovirt_auth:
+ state: absent
+ ovirt_auth: "{{ ovirt_auth }}"
+'''
+
+RETURN = '''
+ovirt_auth:
+ description: Authentication facts, needed to perform authentication to oVirt.
+ returned: success
+ type: dictionary
+ contains:
+ token:
+ description: SSO token which is used for connection to oVirt engine.
+ returned: success
+ type: string
+ sample: "kdfVWp9ZgeewBXV-iq3Js1-xQJZPSEQ334FLb3eksoEPRaab07DhZ8ED8ghz9lJd-MQ2GqtRIeqhvhCkrUWQPw"
+ url:
+ description: URL of the oVirt engine API endpoint.
+ returned: success
+ type: string
+ sample: "https://ovirt.example.com/ovirt-engine/api"
+ ca_file:
+ description: CA file, which is used to verify SSL/TLS connection.
+ returned: success
+ type: string
+ sample: "ca.pem"
+ insecure:
+ description: Flag indicating if insecure connection is used.
+ returned: success
+ type: bool
+ sample: False
+ timeout:
+ description: Number of seconds to wait for response.
+ returned: success
+ type: int
+ sample: 0
+ compress:
+ description: Flag indicating if compression is used for connection.
+ returned: success
+ type: bool
+ sample: True
+ kerberos:
+ description: Flag indicating if kerberos is used for authentication.
+ returned: success
+ type: bool
+ sample: False
+'''
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ url=dict(default=None),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ ca_file=dict(default=None, type='path'),
+ insecure=dict(required=False, type='bool', default=False),
+ timeout=dict(required=False, type='int', default=0),
+ compress=dict(required=False, type='bool', default=True),
+ kerberos=dict(required=False, type='bool', default=False),
+ state=dict(default='present', choices=['present', 'absent']),
+ ovirt_auth=dict(required=None, type='dict'),
+ ),
+ required_if=[
+ ('state', 'absent', ['ovirt_auth']),
+ ('state', 'present', ['username', 'password', 'url']),
+ ],
+ )
+ check_sdk(module)
+
+ state = module.params.get('state')
+ if state == 'present':
+ params = module.params
+ elif state == 'absent':
+ params = module.params['ovirt_auth']
+
+ connection = sdk.Connection(
+ url=params.get('url'),
+ username=params.get('username'),
+ password=params.get('password'),
+ ca_file=params.get('ca_file'),
+ insecure=params.get('insecure'),
+ timeout=params.get('timeout'),
+ compress=params.get('compress'),
+ kerberos=params.get('kerberos'),
+ token=params.get('token'),
+ )
+ try:
+ token = connection.authenticate()
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ ovirt_auth=dict(
+ token=token,
+ url=params.get('url'),
+ ca_file=params.get('ca_file'),
+ insecure=params.get('insecure'),
+ timeout=params.get('timeout'),
+ compress=params.get('compress'),
+ kerberos=params.get('kerberos'),
+ ) if state == 'present' else dict()
+ )
+ )
+ except Exception as e:
+ module.fail_json(msg="Error: %s" % e)
+ finally:
+ # Close the connection, but don't revoke token
+ connection.close(logout=state == 'absent')
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ovirt import *
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_clusters.py b/lib/ansible/modules/cloud/ovirt/ovirt_clusters.py
new file mode 100644
index 0000000000..c40ffcddd8
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_clusters.py
@@ -0,0 +1,564 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_clusters
+short_description: Module to manage clusters in oVirt
+version_added: "2.3"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage clusters in oVirt"
+options:
+ name:
+ description:
+ - "Name of the the cluster to manage."
+ required: true
+ state:
+ description:
+ - "Should the cluster be present or absent"
+ choices: ['present', 'absent']
+ default: present
+ datacenter:
+ description:
+ - "Datacenter name where cluster reside."
+ description:
+ description:
+ - "Description of the cluster."
+ comment:
+ description:
+ - "Comment of the cluster."
+ network:
+ description:
+ - "Management network of cluster to access cluster hosts."
+ ballooning:
+ description:
+ - "If (True) enable memory balloon optimization. Memory balloon is used to
+ re-distribute / reclaim the host memory based on VM needs
+ in a dynamic way."
+ virt:
+ description:
+ - "If (True), hosts in this cluster will be used to run virtual machines."
+ gluster:
+ description:
+ - "If (True), hosts in this cluster will be used as Gluster Storage
+ server nodes, and not for running virtual machines."
+ - "By default the cluster is created for virtual machine hosts."
+ threads_as_cores:
+ description:
+ - "If (True) the exposed host threads would be treated as cores
+ which can be utilized by virtual machines."
+ ksm:
+ description:
+ - "I (True) MoM enables to run Kernel Same-page Merging (KSM) when
+ necessary and when it can yield a memory saving benefit that
+ outweighs its CPU cost."
+ ksm_numa:
+ description:
+ - "If (True) enables KSM C(ksm) for best berformance inside NUMA nodes."
+ ha_reservation:
+ description:
+ - "If (True) enable the oVirt to monitor cluster capacity for highly
+ available virtual machines."
+ trusted_service:
+ description:
+ - "If (True) enable integration with an OpenAttestation server."
+ vm_reason:
+ description:
+ - "If (True) enable an optional reason field when a virtual machine
+ is shut down from the Manager, allowing the administrator to
+ provide an explanation for the maintenance."
+ host_reason:
+ description:
+ - "If (True) enable an optional reason field when a host is placed
+ into maintenance mode from the Manager, allowing the administrator
+ to provide an explanation for the maintenance."
+ memory_policy:
+ description:
+ - "I(disabled) - Disables memory page sharing."
+ - "I(server) - Sets the memory page sharing threshold to 150% of the system memory on each host."
+ - "I(desktop) - Sets the memory page sharing threshold to 200% of the system memory on each host."
+ choices: ['disabled', 'server', 'desktop']
+ rng_sources:
+ description:
+ - "List that specify the random number generator devices that all hosts in the cluster will use."
+ - "Supported generators are: I(hwrng) and I(random)."
+ spice_proxy:
+ description:
+ - "The proxy by which the SPICE client will connect to virtual machines."
+ - "The address must be in the following format: I(protocol://[host]:[port])"
+ fence_enabled:
+ description:
+ - "If (True) enables fencing on the cluster."
+ - "Fencing is enabled by default."
+ fence_skip_if_sd_active:
+ description:
+ - "If (True) any hosts in the cluster that are Non Responsive
+ and still connected to storage will not be fenced."
+ fence_skip_if_connectivity_broken:
+ description:
+ - "If (True) fencing will be temporarily disabled if the percentage
+ of hosts in the cluster that are experiencing connectivity issues
+ is greater than or equal to the defined threshold."
+ - "The threshold can be specified by C(fence_connectivity_threshold)."
+ fence_connectivity_threshold:
+ description:
+ - "The threshold used by C(fence_skip_if_connectivity_broken)."
+ resilience_policy:
+ description:
+ - "The resilience policy defines how the virtual machines are prioritized in the migration."
+ - "Following values are supported:"
+ - "C(do_not_migrate) - Prevents virtual machines from being migrated. "
+ - "C(migrate) - Migrates all virtual machines in order of their defined priority."
+ - "C(migrate_highly_available) - Migrates only highly available virtual machines to prevent overloading other hosts."
+ choices: ['do_not_migrate', 'migrate', 'migrate_highly_available']
+ migration_bandwidth:
+ description:
+ - "The bandwidth settings define the maximum bandwidth of both outgoing and incoming migrations per host."
+ - "Following bandwith options are supported:"
+ - "C(auto) - Bandwidth is copied from the I(rate limit) [Mbps] setting in the data center host network QoS."
+ - "C(hypervisor_default) - Bandwidth is controlled by local VDSM setting on sending host."
+ - "C(custom) - Defined by user (in Mbps)."
+ choices: ['auto', 'hypervisor_default', 'custom']
+ migration_bandwidth_limit:
+ description:
+ - "Set the I(custom) migration bandwidth limit."
+ - "This parameter is used only when C(migration_bandwidth) is I(custom)."
+ migration_auto_converge:
+ description:
+ - "If (True) auto-convergence is used during live migration of virtual machines."
+ - "Used only when C(migration_policy) is set to I(legacy)."
+ - "Following options are supported:"
+ - "C(true) - Override the global setting to I(true)."
+ - "C(false) - Override the global setting to I(false)."
+ - "C(inherit) - Use value which is set globally."
+ choices: ['true', 'false', 'inherit']
+ migration_compressed:
+ description:
+ - "If (True) compression is used during live migration of the virtual machine."
+ - "Used only when C(migration_policy) is set to I(legacy)."
+ - "Following options are supported:"
+ - "C(true) - Override the global setting to I(true)."
+ - "C(false) - Override the global setting to I(false)."
+ - "C(inherit) - Use value which is set globally."
+ choices: ['true', 'false', 'inherit']
+ migration_policy:
+ description:
+ - "A migration policy defines the conditions for live migrating
+ virtual machines in the event of host failure."
+ - "Following policies are supported:"
+ - "C(legacy) - Legacy behavior of 3.6 version."
+ - "C(minimal_downtime) - Virtual machines should not experience any significant downtime."
+ - "C(suspend_workload) - Virtual machines may experience a more significant downtime."
+ choices: ['legacy', 'minimal_downtime', 'suspend_workload']
+ serial_policy:
+ description:
+ - "Specify a serial number policy for the virtual machines in the cluster."
+ - "Following options are supported:"
+ - "C(vm) - Sets the virtual machine's UUID as its serial number."
+ - "C(host) - Sets the host's UUID as the virtual machine's serial number."
+ - "C(custom) - Allows you to specify a custom serial number in C(serial_policy_value)."
+ serial_policy_value:
+ description:
+ - "Allows you to specify a custom serial number."
+ - "This parameter is used only when C(serial_policy) is I(custom)."
+ scheduling_policy:
+ description:
+ - "Name of the scheduling policy to be used for cluster."
+ cpu_arch:
+ description:
+ - "CPU architecture of cluster."
+ choices: ['x86_64', 'ppc64', 'undefined']
+ cpu_type:
+ description:
+ - "CPU codename. For example I(Intel SandyBridge Family)."
+ switch_type:
+ description:
+ - "Type of switch to be used by all networks in given cluster.
+ Either I(legacy) which is using linux brigde or I(ovs) using
+ Open vSwitch."
+ choices: ['legacy', 'ovs']
+ compatibility_version:
+ description:
+ - "The compatibility version of the cluster. All hosts in this
+ cluster must support at least this compatibility version."
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create cluster
+- ovirt_clusters:
+ datacenter: mydatacenter
+ name: mycluster
+ cpu_type: Intel SandyBridge Family
+ description: mycluster
+ compatibility_version: 4.0
+
+# Create virt service cluster:
+- ovirt_clusters:
+ datacenter: mydatacenter
+ name: mycluster
+ cpu_type: Intel Nehalem Family
+ description: mycluster
+ switch_type: legacy
+ compatibility_version: 4.0
+ ballooning: true
+ gluster: false
+ threads_as_cores: true
+ ha_reservation: true
+ trusted_service: false
+ host_reason: false
+ vm_reason: true
+ ksm_numa: true
+ memory_policy: server
+ rng_sources:
+ - hwrng
+ - random
+
+# Remove cluster
+- ovirt_clusters:
+ state: absent
+ name: mycluster
+'''
+
+RETURN = '''
+id:
+ description: ID of the cluster which is managed
+ returned: On success if cluster is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+cluster:
+ description: "Dictionary of all the cluster attributes. Cluster attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/cluster."
+ returned: On success if cluster is found.
+'''
+
+
+class ClustersModule(BaseModule):
+
+ def __get_major(self, full_version):
+ if full_version is None:
+ return None
+ if isinstance(full_version, otypes.Version):
+ return full_version.major
+ return int(full_version.split('.')[0])
+
+ def __get_minor(self, full_version):
+ if full_version is None:
+ return None
+ if isinstance(full_version, otypes.Version):
+ return full_version.minor
+ return int(full_version.split('.')[1])
+
+ def param(self, name, default=None):
+ return self._module.params.get(name, default)
+
+ def _get_memory_policy(self):
+ memory_policy = self.param('memory_policy')
+ if memory_policy == 'desktop':
+ return 200
+ elif memory_policy == 'server':
+ return 150
+ elif memory_policy == 'disabled':
+ return 100
+
+ def _get_policy_id(self):
+ # These are hardcoded IDs, once there is API, please fix this.
+ # legacy - 00000000-0000-0000-0000-000000000000
+ # minimal downtime - 80554327-0569-496b-bdeb-fcbbf52b827b
+ # suspend workload if needed - 80554327-0569-496b-bdeb-fcbbf52b827c
+ migration_policy = self.param('migration_policy')
+ if migration_policy == 'legacy':
+ return '00000000-0000-0000-0000-000000000000'
+ elif migration_policy == 'minimal_downtime':
+ return '80554327-0569-496b-bdeb-fcbbf52b827b'
+ elif migration_policy == 'suspend_workload':
+ return '80554327-0569-496b-bdeb-fcbbf52b827c'
+
+ def _get_sched_policy(self):
+ sched_policy = None
+ if self.param('serial_policy'):
+ sched_policies_service = self._connection.system_service().scheduling_policies_service()
+ sched_policy = search_by_name(sched_policies_service, self.param('scheduling_policy'))
+ if not sched_policy:
+ raise Exception("Scheduling policy '%s' was not found" % self.param('scheduling_policy'))
+
+ return sched_policy
+
+ def build_entity(self):
+ sched_policy = self._get_sched_policy()
+ return otypes.Cluster(
+ name=self.param('name'),
+ comment=self.param('comment'),
+ description=self.param('description'),
+ ballooning_enabled=self.param('ballooning'),
+ gluster_service=self.param('gluster'),
+ virt_service=self.param('virt'),
+ threads_as_cores=self.param('threads_as_cores'),
+ ha_reservation=self.param('ha_reservation'),
+ trusted_service=self.param('trusted_service'),
+ optional_reason=self.param('vm_reason'),
+ maintenance_reason_required=self.param('host_reason'),
+ scheduling_policy=otypes.SchedulingPolicy(
+ id=sched_policy.id,
+ ) if sched_policy else None,
+ serial_number=otypes.SerialNumber(
+ policy=otypes.SerialNumberPolicy(self.param('serial_policy')),
+ value=self.param('serial_policy_value'),
+ ) if (
+ self.param('serial_policy') is not None or
+ self.param('serial_policy_value') is not None
+ ) else None,
+ migration=otypes.MigrationOptions(
+ auto_converge=otypes.InheritableBoolean(
+ self.param('migration_auto_converge'),
+ ) if self.param('migration_auto_converge') else None,
+ bandwidth=otypes.MigrationBandwidth(
+ assignment_method=otypes.MigrationBandwidthAssignmentMethod(
+ self.param('migration_bandwidth'),
+ ) if self.param('migration_bandwidth') else None,
+ custom_value=self.param('migration_bandwidth_limit'),
+ ) if (
+ self.param('migration_bandwidth') or
+ self.param('migration_bandwidth_limit')
+ ) else None,
+ compressed=otypes.InheritableBoolean(
+ self.param('migration_compressed'),
+ ) if self.param('migration_compressed') else None,
+ policy=otypes.MigrationPolicy(
+ id=self._get_policy_id()
+ ) if self.param('migration_policy') else None,
+ ) if (
+ self.param('migration_bandwidth') is not None or
+ self.param('migration_bandwidth_limit') is not None or
+ self.param('migration_auto_converge') is not None or
+ self.param('migration_compressed') is not None or
+ self.param('migration_policy') is not None
+ ) else None,
+ error_handling=otypes.ErrorHandling(
+ on_error=otypes.MigrateOnError(
+ self.param('resilience_policy')
+ ),
+ ) if self.param('resilience_policy') else None,
+ fencing_policy=otypes.FencingPolicy(
+ enabled=(
+ self.param('fence_enabled') or
+ self.param('fence_skip_if_connectivity_broken') or
+ self.param('fence_skip_if_sd_active')
+ ),
+ skip_if_connectivity_broken=otypes.SkipIfConnectivityBroken(
+ enabled=self.param('fence_skip_if_connectivity_broken'),
+ threshold=self.param('fence_connectivity_threshold'),
+ ) if (
+ self.param('fence_skip_if_connectivity_broken') is not None or
+ self.param('fence_connectivity_threshold') is not None
+ ) else None,
+ skip_if_sd_active=otypes.SkipIfSdActive(
+ enabled=self.param('fence_skip_if_sd_active'),
+ ) if self.param('fence_skip_if_sd_active') else None,
+ ) if (
+ self.param('fence_enabled') is not None or
+ self.param('fence_skip_if_sd_active') is not None or
+ self.param('fence_skip_if_connectivity_broken') is not None or
+ self.param('fence_connectivity_threshold') is not None
+ ) else None,
+ display=otypes.Display(
+ proxy=self.param('spice_proxy'),
+ ) if self.param('spice_proxy') else None,
+ required_rng_sources=[
+ otypes.RngSource(rng) for rng in self.param('rng_sources')
+ ] if self.param('rng_sources') else None,
+ memory_policy=otypes.MemoryPolicy(
+ over_commit=otypes.MemoryOverCommit(
+ percent=self._get_memory_policy(),
+ ),
+ ) if self.param('memory_policy') else None,
+ ksm=otypes.Ksm(
+ enabled=self.param('ksm') or self.param('ksm_numa'),
+ merge_across_nodes=not self.param('ksm_numa'),
+ ) if (
+ self.param('ksm_numa') is not None or
+ self.param('ksm') is not None
+ ) else None,
+ data_center=otypes.DataCenter(
+ name=self.param('datacenter'),
+ ) if self.param('datacenter') else None,
+ management_network=otypes.Network(
+ name=self.param('network'),
+ ) if self.param('network') else None,
+ cpu=otypes.Cpu(
+ architecture=self.param('cpu_arch'),
+ type=self.param('cpu_type'),
+ ) if (
+ self.param('cpu_arch') or self.param('cpu_type')
+ ) else None,
+ version=otypes.Version(
+ major=self.__get_major(self.param('compatibility_version')),
+ minor=self.__get_minor(self.param('compatibility_version')),
+ ) if self.param('compatibility_version') else None,
+ switch_type=otypes.SwitchType(
+ self.param('switch_type')
+ ) if self.param('switch_type') else None,
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self.param('comment'), entity.comment) and
+ equal(self.param('description'), entity.description) and
+ equal(self.param('switch_type'), str(entity.switch_type)) and
+ equal(self.param('cpu_arch'), str(entity.cpu.architecture)) and
+ equal(self.param('cpu_type'), entity.cpu.type) and
+ equal(self.param('ballooning'), entity.ballooning_enabled) and
+ equal(self.param('gluster'), entity.gluster_service) and
+ equal(self.param('virt'), entity.virt_service) and
+ equal(self.param('threads_as_cores'), entity.threads_as_cores) and
+ equal(self.param('ksm_numa'), not entity.ksm.merge_across_nodes and entity.ksm.enabled) and
+ equal(self.param('ksm'), entity.ksm.merge_across_nodes and entity.ksm.enabled) and
+ equal(self.param('ha_reservation'), entity.ha_reservation) and
+ equal(self.param('trusted_service'), entity.trusted_service) and
+ equal(self.param('host_reason'), entity.maintenance_reason_required) and
+ equal(self.param('vm_reason'), entity.optional_reason) and
+ equal(self.param('spice_proxy'), getattr(entity.display, 'proxy', None)) and
+ equal(self.param('fence_enabled'), entity.fencing_policy.enabled) and
+ equal(self.param('fence_skip_if_sd_active'), entity.fencing_policy.skip_if_sd_active.enabled) and
+ equal(self.param('fence_skip_if_connectivity_broken'), entity.fencing_policy.skip_if_connectivity_broken.enabled) and
+ equal(self.param('fence_connectivity_threshold'), entity.fencing_policy.skip_if_connectivity_broken.threshold) and
+ equal(self.param('resilience_policy'), str(entity.error_handling.on_error)) and
+ equal(self.param('migration_bandwidth'), str(entity.migration.bandwidth.assignment_method)) and
+ equal(self.param('migration_auto_converge'), str(entity.migration.auto_converge)) and
+ equal(self.param('migration_compressed'), str(entity.migration.compressed)) and
+ equal(self.param('serial_policy'), str(entity.serial_number.policy)) and
+ equal(self.param('serial_policy_value'), entity.serial_number.value) and
+ equal(self.param('scheduling_policy'), self._get_sched_policy().name) and
+ equal(self._get_policy_id(), entity.migration.policy.id) and
+ equal(self._get_memory_policy(), entity.memory_policy.over_commit.percent) and
+ equal(self.__get_minor(self.param('compatibility_version')), self.__get_minor(entity.version)) and
+ equal(self.__get_major(self.param('compatibility_version')), self.__get_major(entity.version)) and
+ equal(
+ self.param('migration_bandwidth_limit') if self.param('migration_bandwidth') == 'custom' else None,
+ entity.migration.bandwidth.custom_value
+ ) and
+ equal(
+ sorted(self.param('rng_sources')) if self.param('rng_sources') else None,
+ sorted([
+ str(source) for source in entity.required_rng_sources
+ ])
+ )
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(default=None, required=True),
+ ballooning=dict(default=None, type='bool', aliases=['balloon']),
+ gluster=dict(default=None, type='bool'),
+ virt=dict(default=None, type='bool'),
+ threads_as_cores=dict(default=None, type='bool'),
+ ksm_numa=dict(default=None, type='bool'),
+ ksm=dict(default=None, type='bool'),
+ ha_reservation=dict(default=None, type='bool'),
+ trusted_service=dict(default=None, type='bool'),
+ vm_reason=dict(default=None, type='bool'),
+ host_reason=dict(default=None, type='bool'),
+ memory_policy=dict(default=None, choices=['disabled', 'server', 'desktop']),
+ rng_sources=dict(default=None, type='list'),
+ spice_proxy=dict(default=None),
+ fence_enabled=dict(default=None, type='bool'),
+ fence_skip_if_sd_active=dict(default=None, type='bool'),
+ fence_skip_if_connectivity_broken=dict(default=None, type='bool'),
+ fence_connectivity_threshold=dict(default=None, type='int'),
+ resilience_policy=dict(default=None, choices=['migrate_highly_available', 'migrate', 'do_not_migrate']),
+ migration_bandwidth=dict(default=None, choices=['auto', 'hypervisor_default', 'custom']),
+ migration_bandwidth_limit=dict(default=None, type='int'),
+ migration_auto_converge=dict(default=None, choices=['true', 'false', 'inherit']),
+ migration_compressed=dict(default=None, choices=['true', 'false', 'inherit']),
+ migration_policy=dict(default=None, choices=['legacy', 'minimal_downtime', 'suspend_workload']),
+ serial_policy=dict(default=None, choices=['vm', 'host', 'custom']),
+ serial_policy_value=dict(default=None),
+ scheduling_policy=dict(default=None),
+ datacenter=dict(default=None),
+ description=dict(default=None),
+ comment=dict(default=None),
+ network=dict(default=None),
+ cpu_arch=dict(default=None, choices=['ppc64', 'undefined', 'x86_64']),
+ cpu_type=dict(default=None),
+ switch_type=dict(default=None, choices=['legacy', 'ovs']),
+ compatibility_version=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ clusters_service = connection.system_service().clusters_service()
+ clusters_module = ClustersModule(
+ connection=connection,
+ module=module,
+ service=clusters_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = clusters_module.create()
+ elif state == 'absent':
+ ret = clusters_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_clusters_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_clusters_facts.py
new file mode 100644
index 0000000000..edcf680bee
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_clusters_facts.py
@@ -0,0 +1,103 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_full_argument_spec,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_clusters_facts
+short_description: Retrieve facts about one or more oVirt clusters
+author: "Ondra Machacek (@machacekondra)"
+version_added: "2.3"
+description:
+ - "Retrieve facts about one or more oVirt clusters."
+notes:
+ - "This module creates a new top-level C(ovirt_clusters) fact, which
+ contains a list of clusters."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt search backend."
+ - "For example to search cluster X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather facts about all clusters which names start with C<production>:
+- ovirt_clusters_facts:
+ pattern: name=production*
+- debug:
+ var: ovirt_clusters
+'''
+
+RETURN = '''
+ovirt_clusters:
+ description: "List of dictionaries describing the clusters. Cluster attribues are mapped to dictionary keys,
+ all clusters attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/cluster."
+ returned: On success.
+ type: list
+'''
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ clusters_service = connection.system_service().clusters_service()
+ clusters = clusters_service.list(search=module.params['pattern'])
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ ovirt_clusters=[
+ get_dict_of_struct(c) for c in clusters
+ ],
+ ),
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_datacenters.py b/lib/ansible/modules/cloud/ovirt/ovirt_datacenters.py
new file mode 100644
index 0000000000..ef63709a5c
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_datacenters.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_params,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_datacenters
+short_description: Module to manage data centers in oVirt
+version_added: "2.3"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage data centers in oVirt"
+options:
+ name:
+ description:
+ - "Name of the the data center to manage."
+ required: true
+ state:
+ description:
+ - "Should the data center be present or absent"
+ choices: ['present', 'absent']
+ default: present
+ description:
+ description:
+ - "Description of the data center."
+ comment:
+ description:
+ - "Comment of the data center."
+ local:
+ description:
+ - "I(True) if the data center should be local, I(False) if should be shared."
+ - "Default value is set by engine."
+ compatibility_version:
+ description:
+ - "Compatibility version of the data center."
+ quota_mode:
+ description:
+ - "Quota mode of the data center. One of I(disabled), I(audit) or I(enabled)"
+ choices: ['disabled', 'audit', 'enabled']
+ mac_pool:
+ description:
+ - "MAC pool to be used by this datacenter."
+ - "IMPORTANT: This option is deprecated in oVirt 4.1. You should
+ use C(mac_pool) in C(ovirt_clusters) module, as MAC pools are
+ set per cluster since 4.1."
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create datacenter
+- ovirt_datacenters:
+ name: mydatacenter
+ local: True
+ compatibility_version: 4.0
+ quota_mode: enabled
+
+# Remove datacenter
+- ovirt_datacenters:
+ state: absent
+ name: mydatacenter
+'''
+
+RETURN = '''
+id:
+ description: "ID of the managed datacenter"
+ returned: "On success if datacenter is found."
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+data_center:
+ description: "Dictionary of all the datacenter attributes. Datacenter attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/datacenter."
+ returned: "On success if datacenter is found."
+'''
+
+
+class DatacentersModule(BaseModule):
+
+ def __get_major(self, full_version):
+ if full_version is None:
+ return None
+ if isinstance(full_version, otypes.Version):
+ return full_version.major
+ return int(full_version.split('.')[0])
+
+ def __get_minor(self, full_version):
+ if full_version is None:
+ return None
+ if isinstance(full_version, otypes.Version):
+ return full_version.minor
+ return int(full_version.split('.')[1])
+
+ def _get_mac_pool(self):
+ mac_pool = None
+ if self._module.params.get('mac_pool'):
+ mac_pool = search_by_name(
+ self._connection.system_service().mac_pools_service(),
+ self._module.params.get('mac_pool'),
+ )
+
+ return mac_pool
+
+ def build_entity(self):
+ return otypes.DataCenter(
+ name=self._module.params['name'],
+ comment=self._module.params['comment'],
+ description=self._module.params['description'],
+ mac_pool=otypes.MacPool(
+ id=getattr(self._get_mac_pool(), 'id', None),
+ ) if self._module.params.get('mac_pool') else None,
+ quota_mode=otypes.QuotaModeType(
+ self._module.params['quota_mode']
+ ) if self._module.params['quota_mode'] else None,
+ local=self._module.params['local'],
+ version=otypes.Version(
+ major=self.__get_major(self._module.params['compatibility_version']),
+ minor=self.__get_minor(self._module.params['compatibility_version']),
+ ) if self._module.params['compatibility_version'] else None,
+ )
+
+ def update_check(self, entity):
+ minor = self.__get_minor(self._module.params.get('compatibility_version'))
+ major = self.__get_major(self._module.params.get('compatibility_version'))
+ return (
+ equal(getattr(self._get_mac_pool(), 'id', None), getattr(entity.mac_pool, 'id', None)) and
+ equal(self._module.params.get('comment'), entity.comment) and
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('quota_mode'), str(entity.quota_mode)) and
+ equal(self._module.params.get('local'), entity.local) and
+ equal(minor, self.__get_minor(entity.version)) and
+ equal(major, self.__get_major(entity.version))
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(default=None, required=True),
+ description=dict(default=None),
+ local=dict(type='bool'),
+ compatibility_version=dict(default=None),
+ quota_mode=dict(choices=['disabled', 'audit', 'enabled']),
+ comment=dict(default=None),
+ mac_pool=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ data_centers_service = connection.system_service().data_centers_service()
+ clusters_module = DatacentersModule(
+ connection=connection,
+ module=module,
+ service=data_centers_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = clusters_module.create()
+ elif state == 'absent':
+ ret = clusters_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_datacenters_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_datacenters_facts.py
new file mode 100644
index 0000000000..6f81295158
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_datacenters_facts.py
@@ -0,0 +1,102 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_full_argument_spec,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_datacenters_facts
+short_description: Retrieve facts about one or more oVirt datacenters
+author: "Ondra Machacek (@machacekondra)"
+version_added: "2.3"
+description:
+ - "Retrieve facts about one or more oVirt datacenters."
+notes:
+ - "This module creates a new top-level C(ovirt_datacenters) fact, which
+ contains a list of datacenters."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt search backend."
+ - "For example to search datacenter I(X) use following pattern: I(name=X)"
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather facts about all data centers which names start with C(production):
+- ovirt_datacenters_facts:
+ pattern: name=production*
+- debug:
+ var: ovirt_datacenters
+'''
+
+RETURN = '''
+ovirt_datacenters:
+ description: "List of dictionaries describing the datacenters. Datacenter attribues are mapped to dictionary keys,
+ all datacenters attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/data_center."
+ returned: On success.
+ type: list
+'''
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ datacenters_service = connection.system_service().data_centers_service()
+ datacenters = datacenters_service.list(search=module.params['pattern'])
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ ovirt_datacenters=[
+ get_dict_of_struct(c) for c in datacenters
+ ],
+ ),
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_disks.py b/lib/ansible/modules/cloud/ovirt/ovirt_disks.py
new file mode 100644
index 0000000000..7730242afb
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_disks.py
@@ -0,0 +1,322 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+try:
+ import ovirtsdk4 as sdk
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.ovirt import *
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_disks
+short_description: "Module to manage Virtual Machine and floating disks in oVirt."
+version_added: "2.2"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage Virtual Machine and floating disks in oVirt."
+options:
+ id:
+ description:
+ - "ID of the disk to manage. Either C(id) or C(name) is required."
+ name:
+ description:
+ - "Name of the disk to manage. Either C(id) or C(name)/C(alias) is required."
+ aliases: ['alias']
+ vm_name:
+ description:
+ - "Name of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)."
+ vm_id:
+ description:
+ - "ID of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)."
+ state:
+ description:
+ - "Should the Virtual Machine disk be present/absent/attached/detached."
+ choices: ['present', 'absent', 'attached', 'detached']
+ default: 'present'
+ size:
+ description:
+ - "Size of the disk. Size should be specified using IEC standard units. For example 10GiB, 1024MiB, etc."
+ interface:
+ description:
+ - "Driver of the storage interface."
+ choices: ['virtio', 'ide', 'virtio_scsi']
+ default: 'virtio'
+ format:
+ description:
+ - Specify format of the disk.
+ - If (cow) format is used, disk will by created as sparse, so space will be allocated for the volume as needed, also known as I(thin provision).
+ - If (raw) format is used, disk storage will be allocated right away, also known as I(preallocated).
+ - Note that this option isn't idempotent as it's not currently possible to change format of the disk via API.
+ choices: ['raw', 'cow']
+ storage_domain:
+ description:
+ - "Storage domain name where disk should be created. By default storage is chosen by oVirt engine."
+ profile:
+ description:
+ - "Disk profile name to be attached to disk. By default profile is chosen by oVirt engine."
+ bootable:
+ description:
+ - "I(True) if the disk should be bootable. By default when disk is created it isn't bootable."
+ shareable:
+ description:
+ - "I(True) if the disk should be shareable. By default when disk is created it isn't shareable."
+ logical_unit:
+ description:
+ - "Dictionary which describes LUN to be directly attached to VM:"
+ - "C(address) - Address of the storage server. Used by iSCSI."
+ - "C(port) - Port of the storage server. Used by iSCSI."
+ - "C(target) - iSCSI target."
+ - "C(lun_id) - LUN id."
+ - "C(username) - CHAP Username to be used to access storage server. Used by iSCSI."
+ - "C(password) - CHAP Password of the user to be used to access storage server. Used by iSCSI."
+ - "C(storage_type) - Storage type either I(fcp) or I(iscsi)."
+extends_documentation_fragment: ovirt
+'''
+
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create and attach new disk to VM
+- ovirt_disks:
+ name: myvm_disk
+ vm_name: rhel7
+ size: 10GiB
+ format: cow
+ interface: virtio
+
+# Attach logical unit to VM rhel7
+- ovirt_disks:
+ vm_name: rhel7
+ logical_unit:
+ target: iqn.2016-08-09.brq.str-01:omachace
+ id: 1IET_000d0001
+ address: 10.34.63.204
+ interface: virtio
+
+# Detach disk from VM
+- ovirt_disks:
+ state: detached
+ name: myvm_disk
+ vm_name: rhel7
+ size: 10GiB
+ format: cow
+ interface: virtio
+'''
+
+
+RETURN = '''
+id:
+ description: "ID of the managed disk"
+ returned: "On success if disk is found."
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+disk:
+ description: "Dictionary of all the disk attributes. Disk attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/disk."
+ returned: "On success if disk is found and C(vm_id) or C(vm_name) wasn't passed."
+
+disk_attachment:
+ description: "Dictionary of all the disk attachment attributes. Disk attachment attributes can be found
+ on your oVirt instance at following url:
+ https://ovirt.example.com/ovirt-engine/api/model#types/disk_attachment."
+ returned: "On success if disk is found and C(vm_id) or C(vm_name) was passed and VM was found."
+'''
+
+
+
+def _search_by_lun(disks_service, lun_id):
+ """
+ Find disk by LUN ID.
+ """
+ res = [
+ disk for disk in disks_service.list(search='disk_type=lun') if (
+ disk.lun_storage.id == lun_id
+ )
+ ]
+ return res[0] if res else None
+
+
+class DisksModule(BaseModule):
+
+ def build_entity(self):
+ logical_unit = self._module.params.get('logical_unit')
+ return otypes.Disk(
+ id=self._module.params.get('id'),
+ name=self._module.params.get('name'),
+ description=self._module.params.get('description'),
+ format=otypes.DiskFormat(
+ self._module.params.get('format')
+ ) if self._module.params.get('format') else None,
+ sparse=False if self._module.params.get('format') == 'raw' else True,
+ provisioned_size=convert_to_bytes(
+ self._module.params.get('size')
+ ),
+ storage_domains=[
+ otypes.StorageDomain(
+ name=self._module.params.get('storage_domain'),
+ ),
+ ],
+ shareable=self._module.params.get('shareable'),
+ lun_storage=otypes.HostStorage(
+ type=otypes.StorageType(
+ logical_unit.get('storage_type', 'iscsi')
+ ),
+ logical_units=[
+ otypes.LogicalUnit(
+ address=logical_unit.get('address'),
+ port=logical_unit.get('port', 3260),
+ target=logical_unit.get('target'),
+ id=logical_unit.get('id'),
+ username=logical_unit.get('username'),
+ password=logical_unit.get('password'),
+ )
+ ],
+ ) if logical_unit else None,
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('description'), entity.description) and
+ equal(convert_to_bytes(self._module.params.get('size')), entity.provisioned_size) and
+ equal(self._module.params.get('shareable'), entity.shareable)
+ )
+
+
+class DiskAttachmentsModule(DisksModule):
+
+ def build_entity(self):
+ return otypes.DiskAttachment(
+ disk=super(DiskAttachmentsModule, self).build_entity(),
+ interface=otypes.DiskInterface(
+ self._module.params.get('interface')
+ ) if self._module.params.get('interface') else None,
+ bootable=self._module.params.get('bootable'),
+ active=True,
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('interface'), str(entity.interface)) and
+ equal(self._module.params.get('bootable'), entity.bootable)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent', 'attached', 'detached'],
+ default='present'
+ ),
+ id=dict(default=None),
+ name=dict(default=None, aliases=['alias']),
+ vm_name=dict(default=None),
+ vm_id=dict(default=None),
+ size=dict(default=None),
+ interface=dict(default=None,),
+ storage_domain=dict(default=None),
+ profile=dict(default=None),
+ format=dict(default=None, choices=['raw', 'cow']),
+ bootable=dict(default=None, type='bool'),
+ shareable=dict(default=None, type='bool'),
+ logical_unit=dict(default=None, type='dict'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ disk = None
+ state = module.params['state']
+ connection = create_connection(module.params.pop('auth'))
+ disks_service = connection.system_service().disks_service()
+ disks_module = DisksModule(
+ connection=connection,
+ module=module,
+ service=disks_service,
+ )
+
+ lun = module.params.get('logical_unit')
+ if lun:
+ disk = _search_by_lun(disks_service, lun.get('id'))
+
+ ret = None
+ # First take care of creating the VM, if needed:
+ if state == 'present' or state == 'detached' or state == 'attached':
+ ret = disks_module.create(
+ entity=disk,
+ result_state=otypes.DiskStatus.OK if lun is None else None,
+ )
+ # We need to pass ID to the module, so in case we want detach/attach disk
+ # we have this ID specified to attach/detach method:
+ module.params['id'] = ret['id'] if disk is None else disk.id
+ elif state == 'absent':
+ ret = disks_module.remove()
+
+ # If VM was passed attach/detach disks to/from the VM:
+ if module.params.get('vm_id') is not None or module.params.get('vm_name') is not None and state != 'absent':
+ vms_service = connection.system_service().vms_service()
+
+ # If `vm_id` isn't specified, find VM by name:
+ vm_id = module.params['vm_id']
+ if vm_id is None:
+ vm_id = getattr(search_by_name(vms_service, module.params['vm_name']), 'id', None)
+
+ if vm_id is None:
+ module.fail_json(
+ msg="VM don't exists, please create it first."
+ )
+
+ disk_attachments_service = vms_service.vm_service(vm_id).disk_attachments_service()
+ disk_attachments_module = DiskAttachmentsModule(
+ connection=connection,
+ module=module,
+ service=disk_attachments_service,
+ changed=ret['changed'] if ret else False,
+ )
+
+ if state == 'present' or state == 'attached':
+ ret = disk_attachments_module.create()
+ elif state == 'detached':
+ ret = disk_attachments_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ finally:
+ connection.close(logout=False)
+
+
+from ansible.module_utils.basic import *
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_external_providers.py b/lib/ansible/modules/cloud/ovirt/ovirt_external_providers.py
new file mode 100644
index 0000000000..9bcb38a78f
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_external_providers.py
@@ -0,0 +1,248 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ BaseModule,
+ check_params,
+ check_sdk,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_external_providers
+short_description: Module to manage external providers in oVirt
+version_added: "2.3"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage external providers in oVirt"
+options:
+ name:
+ description:
+ - "Name of the the external provider to manage."
+ state:
+ description:
+ - "Should the external be present or absent"
+ choices: ['present', 'absent']
+ default: present
+ description:
+ description:
+ - "Description of the external provider."
+ type:
+ description:
+ - "Type of the external provider."
+ choices: ['os_image', 'os_network', 'os_volume', 'foreman']
+ url:
+ description:
+ - "URL where external provider is hosted."
+ - "Applicable for those types: I(os_image), I(os_volume), I(os_network) and I(foreman)."
+ username:
+ description:
+ - "Username to be used for login to external provider."
+ - "Applicable for all types."
+ password:
+ description:
+ - "Password of the user specified in C(username) parameter."
+ - "Applicable for all types."
+ tenant_name:
+ description:
+ - "Name of the tenant."
+ - "Applicable for those types: I(os_image), I(os_volume) and I(os_network)."
+ aliases: ['tenant']
+ authentication_url:
+ description:
+ - "Keystone authentication URL of the openstack provider."
+ - "Applicable for those types: I(os_image), I(os_volume) and I(os_network)."
+ aliases: ['auth_url']
+ data_center:
+ description:
+ - "Name of the data center where provider should be attached."
+ - "Applicable for those type: I(os_volume)."
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add image external provider:
+- ovirt_external_providers:
+ name: image_provider
+ type: os_image
+ url: http://10.34.63.71:9292
+ username: admin
+ password: 123456
+ tenant: admin
+ auth_url: http://10.34.63.71:35357/v2.0/
+
+# Add foreman provider:
+- ovirt_external_providers:
+ name: foreman_provider
+ type: foreman
+ url: https://foreman.example.com
+ username: admin
+ password: 123456
+
+# Remove image external provider:
+- ovirt_external_providers:
+ state: absent
+ name: image_provider
+ type: os_image
+'''
+
+RETURN = '''
+id:
+ description: ID of the external provider which is managed
+ returned: On success if external provider is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+external_host_provider:
+ description: "Dictionary of all the external_host_provider attributes. External provider attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/external_host_provider."
+ returned: "On success and if parameter 'type: foreman' is used."
+ type: dictionary
+openstack_image_provider:
+ description: "Dictionary of all the openstack_image_provider attributes. External provider attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_image_provider."
+ returned: "On success and if parameter 'type: os_image' is used."
+ type: dictionary
+openstack_volume_provider:
+ description: "Dictionary of all the openstack_volume_provider attributes. External provider attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_volume_provider."
+ returned: "On success and if parameter 'type: os_volume' is used."
+ type: dictionary
+openstack_network_provider:
+ description: "Dictionary of all the openstack_network_provider attributes. External provider attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_network_provider."
+ returned: "On success and if parameter 'type: os_network' is used."
+ type: dictionary
+'''
+
+
+class ExternalProviderModule(BaseModule):
+
+ def provider_type(self, provider_type):
+ self._provider_type = provider_type
+
+ def build_entity(self):
+ provider_type = self._provider_type(
+ requires_authentication='username' in self._module.params,
+ )
+ for key, value in self._module.params.items():
+ if hasattr(provider_type, key):
+ setattr(provider_type, key, value)
+
+ return provider_type
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('url'), entity.url) and
+ equal(self._module.params.get('authentication_url'), entity.authentication_url) and
+ equal(self._module.params.get('tenant_name'), getattr(entity, 'tenant_name', None)) and
+ equal(self._module.params.get('username'), entity.username)
+ )
+
+
+def _external_provider_service(provider_type, system_service):
+ if provider_type == 'os_image':
+ return otypes.OpenStackImageProvider, system_service.openstack_image_providers_service()
+ elif provider_type == 'os_network':
+ return otypes.OpenStackNetworkProvider, system_service.openstack_network_providers_service()
+ elif provider_type == 'os_volume':
+ return otypes.OpenStackVolumeProvider, system_service.openstack_volume_providers_service()
+ elif provider_type == 'foreman':
+ return otypes.ExternalHostProvider, system_service.external_host_providers_service()
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(default=None),
+ description=dict(default=None),
+ type=dict(
+ default=None,
+ required=True,
+ choices=[
+ 'os_image', 'os_network', 'os_volume', 'foreman',
+ ],
+ aliases=['provider'],
+ ),
+ url=dict(default=None),
+ username=dict(default=None),
+ password=dict(default=None, no_log=True),
+ tenant_name=dict(default=None, aliases=['tenant']),
+ authentication_url=dict(default=None, aliases=['auth_url']),
+ data_center=dict(default=None, aliases=['data_center']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ provider_type, external_providers_service = _external_provider_service(
+ provider_type=module.params.pop('type'),
+ system_service=connection.system_service(),
+ )
+ external_providers_module = ExternalProviderModule(
+ connection=connection,
+ module=module,
+ service=external_providers_service,
+ )
+ external_providers_module.provider_type(provider_type)
+
+ state = module.params.pop('state')
+ if state == 'absent':
+ ret = external_providers_module.remove()
+ elif state == 'present':
+ ret = external_providers_module.create()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_external_providers_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_external_providers_facts.py
new file mode 100644
index 0000000000..b67ec4d89d
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_external_providers_facts.py
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_full_argument_spec,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_external_providers_facts
+short_description: Retrieve facts about one or more oVirt external_providers
+version_added: "2.3"
+description:
+ - "Retrieve facts about one or more oVirt external_providers."
+notes:
+ - "This module creates a new top-level C(ovirt_external_providers) fact, which
+ contains a list of external_providers."
+options:
+ type:
+ description:
+ - "Type of the external provider."
+ choices: ['os_image', 'os_network', 'os_volume', 'foreman']
+ required: true
+ name:
+ description:
+ - "Name of the external provider, can be used as glob expression."
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather facts about all image external providers named C<glance>:
+- ovirt_external_providers_facts:
+ type: os_image
+ name: glance
+- debug:
+ var: ovirt_external_providers
+'''
+
+RETURN = '''
+external_host_providers:
+ description: "List of dictionaries of all the external_host_provider attributes. External provider attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/external_host_provider."
+ returned: "On success and if parameter 'type: foreman' is used."
+ type: list
+openstack_image_providers:
+ description: "List of dictionaries of all the openstack_image_provider attributes. External provider attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_image_provider."
+ returned: "On success and if parameter 'type: os_image' is used."
+ type: list
+openstack_volume_providers:
+ description: "List of dictionaries of all the openstack_volume_provider attributes. External provider attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_volume_provider."
+ returned: "On success and if parameter 'type: os_volume' is used."
+ type: list
+openstack_network_providers:
+ description: "List of dictionaries of all the openstack_network_provider attributes. External provider attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/openstack_network_provider."
+ returned: "On success and if parameter 'type: os_network' is used."
+ type: list
+'''
+
+
+def _external_provider_service(provider_type, system_service):
+ if provider_type == 'os_image':
+ return system_service.openstack_image_providers_service()
+ elif provider_type == 'os_network':
+ return system_service.openstack_network_providers_service()
+ elif provider_type == 'os_volume':
+ return system_service.openstack_volume_providers_service()
+ elif provider_type == 'foreman':
+ return system_service.external_host_providers_service()
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ name=dict(default=None, required=False),
+ type=dict(
+ default=None,
+ required=True,
+ choices=[
+ 'os_image', 'os_network', 'os_volume', 'foreman',
+ ],
+ aliases=['provider'],
+ ),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ external_providers_service = _external_provider_service(
+ provider_type=module.params.pop('type'),
+ system_service=connection.system_service(),
+ )
+ if module.params['name']:
+ external_providers = [
+ e for e in external_providers_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ external_providers = external_providers_service.list()
+
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ ovirt_external_providers=[
+ get_dict_of_struct(c) for c in external_providers
+ ],
+ ),
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_groups.py b/lib/ansible/modules/cloud/ovirt/ovirt_groups.py
new file mode 100644
index 0000000000..34f326e64b
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_groups.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_params,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_groups
+short_description: Module to manage groups in oVirt
+version_added: "2.3"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage groups in oVirt"
+options:
+ name:
+ description:
+ - "Name of the the group to manage."
+ required: true
+ state:
+ description:
+ - "Should the group be present/absent."
+ choices: ['present', 'absent']
+ default: present
+ authz_name:
+ description:
+ - "Authorization provider of the group. In previous versions of oVirt known as domain."
+ required: true
+ aliases: ['domain']
+ namespace:
+ description:
+ - "Namespace of the authorization provider, where group resides."
+ required: false
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add group group1 from authorization provider example.com-authz
+ovirt_groups:
+ name: group1
+ domain: example.com-authz
+
+# Add group group1 from authorization provider example.com-authz
+# In case of multi-domain Active Directory setup, you should pass
+# also namespace, so it adds correct group:
+ovirt_groups:
+ name: group1
+ namespace: dc=ad2,dc=example,dc=com
+ domain: example.com-authz
+
+# Remove group group1 with authorization provider example.com-authz
+ovirt_groups:
+ state: absent
+ name: group1
+ domain: example.com-authz
+'''
+
+RETURN = '''
+id:
+ description: ID of the group which is managed
+ returned: On success if group is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+group:
+ description: "Dictionary of all the group attributes. Group attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/group."
+ returned: On success if group is found.
+'''
+
+
+def _group(connection, module):
+ groups = connection.system_service().groups_service().list(
+ search="name={name}".format(
+ name=module.params['name'],
+ )
+ )
+
+ # If found more groups, filter them by namespace and authz name:
+ # (filtering here, as oVirt backend doesn't support it)
+ if len(groups) > 1:
+ groups = [
+ g for g in groups if (
+ equal(module.params['namespace'], g.namespace) and
+ equal(module.params['authz_name'], g.domain.name)
+ )
+ ]
+ return groups[0] if groups else None
+
+
+class GroupsModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.Group(
+ domain=otypes.Domain(
+ name=self._module.params['authz_name']
+ ),
+ name=self._module.params['name'],
+ namespace=self._module.params['namespace'],
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(required=True),
+ authz_name=dict(required=True, aliases=['domain']),
+ namespace=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ groups_service = connection.system_service().groups_service()
+ groups_module = GroupsModule(
+ connection=connection,
+ module=module,
+ service=groups_service,
+ )
+ group = _group(connection, module)
+ state = module.params['state']
+ if state == 'present':
+ ret = groups_module.create(entity=group)
+ elif state == 'absent':
+ ret = groups_module.remove(entity=group)
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_groups_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_groups_facts.py
new file mode 100644
index 0000000000..ab4252ffc9
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_groups_facts.py
@@ -0,0 +1,102 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_full_argument_spec,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_groups_facts
+short_description: Retrieve facts about one or more oVirt groups
+author: "Ondra Machacek (@machacekondra)"
+version_added: "2.3"
+description:
+ - "Retrieve facts about one or more oVirt groups."
+notes:
+ - "This module creates a new top-level C(ovirt_groups) fact, which
+ contains a list of groups."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt search backend."
+ - "For example to search group X use following pattern: name=X"
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather facts about all groups which names start with C(admin):
+- ovirt_groups_facts:
+ pattern: name=admin*
+- debug:
+ var: ovirt_groups
+'''
+
+RETURN = '''
+ovirt_groups:
+ description: "List of dictionaries describing the groups. Group attribues are mapped to dictionary keys,
+ all groups attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/group."
+ returned: On success.
+ type: list
+'''
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ groups_service = connection.system_service().groups_service()
+ groups = groups_service.list(search=module.params['pattern'])
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ ovirt_groups=[
+ get_dict_of_struct(c) for c in groups
+ ],
+ ),
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_host_networks.py b/lib/ansible/modules/cloud/ovirt/ovirt_host_networks.py
new file mode 100644
index 0000000000..edf6d3c378
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_host_networks.py
@@ -0,0 +1,368 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+try:
+ import ovirtsdk4 as sdk
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.ovirt import *
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_networks
+short_description: Module to manage host networks in oVirt
+version_added: "2.3"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage host networks in oVirt."
+options:
+ name:
+ description:
+ - "Name of the the host to manage networks for."
+ required: true
+ state:
+ description:
+ - "Should the host be present/absent."
+ choices: ['present', 'absent']
+ default: present
+ bond:
+ description:
+ - "Dictionary describing network bond:"
+ - "C(name) - Bond name."
+ - "C(mode) - Bonding mode."
+ - "C(interfaces) - List of interfaces to create a bond."
+ interface:
+ description:
+ - "Name of the network interface where logical network should be attached."
+ networks:
+ description:
+ - "List of dictionary describing networks to be attached to interface or bond:"
+ - "C(name) - Name of the logical network to be assigned to bond or interface."
+ - "C(boot_protocol) - Boot protocol one of the I(none), I(static) or I(dhcp)."
+ - "C(address) - IP address in case of I(static) boot protocol is used."
+ - "C(prefix) - Routing prefix in case of I(static) boot protocol is used."
+ - "C(gateway) - Gateway in case of I(static) boot protocol is used."
+ - "C(version) - IP version. Either v4 or v6."
+ labels:
+ description:
+ - "List of names of the network label to be assigned to bond or interface."
+ check:
+ description:
+ - "If I(true) verify connectivity between host and engine."
+ - "Network configuration changes will be rolled back if connectivity between
+ engine and the host is lost after changing network configuration."
+ save:
+ description:
+ - "If I(true) network configuration will be persistent, by default they are temporary."
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create bond on eth0 and eth1 interface, and put 'myvlan' network on top of it:
+- name: Bonds
+ ovirt_host_networks:
+ name: myhost
+ bond:
+ name: bond0
+ mode: 2
+ interfaces:
+ - eth1
+ - eth2
+ networks:
+ - name: myvlan
+ boot_protocol: static
+ address: 1.2.3.4
+ prefix: 24
+ gateway: 1.2.3.4
+ version: v4
+
+# Remove bond0 bond from host interfaces:
+- ovirt_host_networks:
+ state: absent
+ name: myhost
+ bond:
+ name: bond0
+
+# Assign myvlan1 and myvlan2 vlans to host eth0 interface:
+- ovirt_host_networks:
+ name: myhost
+ interface: eth0
+ networks:
+ - name: myvlan1
+ - name: myvlan2
+
+# Remove myvlan2 vlan from host eth0 interface:
+- ovirt_host_networks:
+ state: absent
+ name: myhost
+ interface: eth0
+ networks:
+ - name: myvlan2
+
+# Remove all networks/vlans from host eth0 interface:
+- ovirt_host_networks:
+ state: absent
+ name: myhost
+ interface: eth0
+'''
+
+RETURN = '''
+id:
+ description: ID of the host NIC which is managed
+ returned: On success if host NIC is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+host_nic:
+ description: "Dictionary of all the host NIC attributes. Host NIC attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/host_nic."
+ returned: On success if host NIC is found.
+'''
+
+
+class HostNetworksModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.Host()
+
+ def update_address(self, attachment, network):
+ # Check if there is any change in address assignenmts and
+ # update it if needed:
+ for ip in attachment.ip_address_assignments:
+ if str(ip.ip.version) == network.get('version'):
+ changed = False
+ if not equal(network.get('boot_protocol'), str(ip.assignment_method)):
+ ip.assignment_method = otypes.BootProtocol(network.get('boot_protocol'))
+ changed = True
+ if not equal(network.get('address'), ip.ip.address):
+ ip.ip.address = network.get('address')
+ changed = True
+ if not equal(network.get('gateway'), ip.ip.gateway):
+ ip.ip.gateway = network.get('gateway')
+ changed = True
+ if not equal(network.get('prefix'), int(ip.ip.netmask)):
+ ip.ip.netmask = str(network.get('prefix'))
+ changed = True
+
+ if changed:
+ attachments_service.service(attachment.id).update(attachment)
+ self.changed = True
+ break
+
+ def has_update(self, nic_service):
+ update = False
+ bond = self._module.params['bond']
+ networks = self._module.params['networks']
+ nic = nic_service.get()
+
+ if nic is None:
+ return update
+
+ # Check if bond configuration should be updated:
+ if bond:
+ update = not (
+ equal(str(bond.get('mode')), nic.bonding.options[0].value) and
+ equal(
+ sorted(bond.get('interfaces')) if bond.get('interfaces') else None,
+ sorted(get_link_name(self._connection, s) for s in nic.bonding.slaves)
+ )
+ )
+
+ if not networks:
+ return update
+
+ # Check if networks attachments configuration should be updated:
+ attachments_service = nic_service.network_attachments_service()
+ network_names = [network.get('name') for network in networks]
+
+ attachments = {}
+ for attachment in attachments_service.list():
+ name = get_link_name(self._connection, attachment.network)
+ if name in network_names:
+ attachments[name] = attachment
+
+ for network in networks:
+ attachment = attachments.get(network.get('name'))
+ # If attachment don't exsits, we need to create it:
+ if attachment is None:
+ return True
+
+ self.update_address(attachment, network)
+
+ return update
+
+ def _action_save_configuration(self, entity):
+ if self._module.params['save']:
+ if not self._module.check_mode:
+ self._service.service(entity.id).commit_net_config()
+ self.changed = True
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(default=None, aliases=['host'], required=True),
+ bond=dict(default=None, type='dict'),
+ interface=dict(default=None),
+ networks=dict(default=None, type='list'),
+ labels=dict(default=None, type='list'),
+ check=dict(default=None, type='bool'),
+ save=dict(default=None, type='bool'),
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ hosts_service = connection.system_service().hosts_service()
+ host_networks_module = HostNetworksModule(
+ connection=connection,
+ module=module,
+ service=hosts_service,
+ )
+
+ host = host_networks_module.search_entity()
+ if host is None:
+ raise Exception("Host '%s' was not found." % module.params['name'])
+
+ bond = module.params['bond']
+ interface = module.params['interface']
+ networks = module.params['networks']
+ labels = module.params['labels']
+ nic_name = bond.get('name') if bond else module.params['interface']
+
+ nics_service = hosts_service.host_service(host.id).nics_service()
+ nic = search_by_name(nics_service, nic_name)
+
+ state = module.params['state']
+ if (
+ state == 'present' and
+ (nic is None or host_networks_module.has_update(nics_service.service(nic.id)))
+ ):
+ host_networks_module.action(
+ entity=host,
+ action='setup_networks',
+ post_action=host_networks_module._action_save_configuration,
+ check_connectivity=module.params['check'],
+ modified_bonds=[
+ otypes.HostNic(
+ name=bond.get('name'),
+ bonding=otypes.Bonding(
+ options=[
+ otypes.Option(
+ name="mode",
+ value=str(bond.get('mode')),
+ )
+ ],
+ slaves=[
+ otypes.HostNic(name=i) for i in bond.get('interfaces', [])
+ ],
+ ),
+ ),
+ ] if bond else None,
+ modified_labels=[
+ otypes.NetworkLabel(
+ name=str(name),
+ host_nic=otypes.HostNic(
+ name=bond.get('name') if bond else interface
+ ),
+ ) for name in labels
+ ] if labels else None,
+ modified_network_attachments=[
+ otypes.NetworkAttachment(
+ network=otypes.Network(
+ name=network['name']
+ ) if network['name'] else None,
+ host_nic=otypes.HostNic(
+ name=bond.get('name') if bond else interface
+ ),
+ ip_address_assignments=[
+ otypes.IpAddressAssignment(
+ assignment_method=otypes.BootProtocol(
+ network.get('boot_protocol', 'none')
+ ),
+ ip=otypes.Ip(
+ address=network.get('address'),
+ gateway=network.get('gateway'),
+ netmask=network.get('netmask'),
+ version=otypes.IpVersion(
+ network.get('version')
+ ) if network.get('version') else None,
+ ),
+ ),
+ ],
+ ) for network in networks
+ ] if networks else None,
+ )
+ elif state == 'absent' and nic:
+ attachments_service = nics_service.nic_service(nic.id).network_attachments_service()
+ attachments = attachments_service.list()
+ if networks:
+ network_names = [network['name'] for network in networks]
+ attachments = [
+ attachment for attachment in attachments
+ if get_link_name(connection, attachment.network) in network_names
+ ]
+ if labels or bond or attachments:
+ host_networks_module.action(
+ entity=host,
+ action='setup_networks',
+ post_action=host_networks_module._action_save_configuration,
+ check_connectivity=module.params['check'],
+ removed_bonds=[
+ otypes.HostNic(
+ name=bond.get('name'),
+ ),
+ ] if bond else None,
+ removed_labels=[
+ otypes.NetworkLabel(
+ name=str(name),
+ ) for name in labels
+ ] if labels else None,
+ removed_network_attachments=list(attachments),
+ )
+
+ nic = search_by_name(nics_service, nic_name)
+ module.exit_json(**{
+ 'changed': host_networks_module.changed,
+ 'id': nic.id if nic else None,
+ 'host_nic': get_dict_of_struct(nic),
+ })
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ finally:
+ connection.close(logout=False)
+
+from ansible.module_utils.basic import *
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_host_pm.py b/lib/ansible/modules/cloud/ovirt/ovirt_host_pm.py
new file mode 100644
index 0000000000..41475ad7bb
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_host_pm.py
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+try:
+ import ovirtsdk4 as sdk
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.ovirt import *
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_host_pm
+short_description: Module to manage power management of hosts in oVirt
+version_added: "2.3"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage power management of hosts in oVirt."
+options:
+ name:
+ description:
+ - "Name of the the host to manage."
+ required: true
+ aliases: ['host']
+ state:
+ description:
+ - "Should the host be present/absent."
+ choices: ['present', 'absent']
+ default: present
+ address:
+ description:
+ - "Address of the power management interface."
+ username:
+ description:
+ - "Username to be used to connect to power management interface."
+ password:
+ description:
+ - "Password of the user specified in C(username) parameter."
+ type:
+ description:
+ - "Type of the power management. oVirt predefined values are I(drac5), I(ipmilan), I(rsa),
+ I(bladecenter), I(alom), I(apc), I(apc_snmp), I(eps), I(wti), I(rsb), I(cisco_ucs),
+ I(drac7), I(hpblade), I(ilo), I(ilo2), I(ilo3), I(ilo4), I(ilo_ssh),
+ but user can have defined custom type."
+ port:
+ description:
+ - "Power management interface port."
+ slot:
+ description:
+ - "Power management slot."
+ options:
+ description:
+ - "Dictionary of additional fence agent options."
+ - "Additional information about options can be found at U(https://fedorahosted.org/cluster/wiki/FenceArguments)."
+ encrypt_options:
+ description:
+ - "If (true) options will be encrypted when send to agent."
+ aliases: ['encrypt']
+ order:
+ description:
+ - "Integer value specifying, by default it's added at the end."
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add fence agent to host 'myhost'
+- ovirt_host_pm:
+ name: myhost
+ address: 1.2.3.4
+ options:
+ myoption1: x
+ myoption2: y
+ username: admin
+ password: admin
+ port: 3333
+ type: ipmilan
+
+# Remove ipmilan fence agent with address 1.2.3.4 on host 'myhost'
+- ovirt_host_pm:
+ state: absent
+ name: myhost
+ address: 1.2.3.4
+ type: ipmilan
+'''
+
+RETURN = '''
+id:
+ description: ID of the agent which is managed
+ returned: On success if agent is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+agent:
+ description: "Dictionary of all the agent attributes. Agent attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/agent."
+ returned: On success if agent is found.
+'''
+
+
+class HostModule(BaseModule):
+ def build_entity(self):
+ return otypes.Host(
+ power_management=otypes.PowerManagement(
+ enabled=True,
+ ),
+ )
+
+ def update_check(self, entity):
+ return equal(True, entity.power_management.enabled)
+
+
+class HostPmModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.Agent(
+ address=self._module.params['address'],
+ encrypt_options=self._module.params['encrypt_options'],
+ options=[
+ otypes.Option(
+ name=name,
+ value=value,
+ ) for name, value in self._module.params['options'].items()
+ ] if self._module.params['options'] else None,
+ password=self._module.params['password'],
+ port=self._module.params['port'],
+ type=self._module.params['type'],
+ username=self._module.params['username'],
+ order=self._module.params.get('order', 100),
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('address'), entity.address) and
+ equal(self._module.params.get('encrypt_options'), entity.encrypt_options) and
+ equal(self._module.params.get('password'), entity.password) and
+ equal(self._module.params.get('username'), entity.username) and
+ equal(self._module.params.get('port'), entity.port) and
+ equal(self._module.params.get('type'), entity.type)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(default=None, required=True, aliases=['host']),
+ address=dict(default=None),
+ username=dict(default=None),
+ password=dict(default=None),
+ type=dict(default=None),
+ port=dict(default=None, type='int'),
+ slot=dict(default=None),
+ options=dict(default=None, type='dict'),
+ encrypt_options=dict(default=None, type='bool', aliases=['encrypt']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ hosts_service = connection.system_service().hosts_service()
+ host = search_by_name(hosts_service, module.params['name'])
+ fence_agents_service = hosts_service.host_service(host.id).fence_agents_service()
+
+ host_pm_module = HostPmModule(
+ connection=connection,
+ module=module,
+ service=fence_agents_service,
+ )
+ host_module = HostModule(
+ connection=connection,
+ module=module,
+ service=hosts_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ agent = host_pm_module.search_entity(
+ search_params={
+ 'address': module.params['address'],
+ 'type': module.params['type'],
+ }
+ )
+ ret = host_pm_module.create(entity=agent)
+
+ # Enable Power Management, if it's not enabled:
+ host_module.create(entity=host)
+ elif state == 'absent':
+ agent = host_pm_module.search_entity(
+ search_params={
+ 'address': module.params['address'],
+ 'type': module.params['type'],
+ }
+ )
+ ret = host_pm_module.remove(entity=agent)
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ finally:
+ connection.close(logout=False)
+
+from ansible.module_utils.basic import *
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_hosts.py b/lib/ansible/modules/cloud/ovirt/ovirt_hosts.py
new file mode 100644
index 0000000000..1394692f8c
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_hosts.py
@@ -0,0 +1,326 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+try:
+ import ovirtsdk4 as sdk
+ import ovirtsdk4.types as otypes
+
+ from ovirtsdk4.types import HostStatus as hoststate
+except ImportError:
+ pass
+
+from ansible.module_utils.ovirt import *
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_hosts
+short_description: Module to manage hosts in oVirt
+version_added: "2.3"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage hosts in oVirt"
+options:
+ name:
+ description:
+ - "Name of the the host to manage."
+ required: true
+ state:
+ description:
+ - "State which should a host to be in after successful completion."
+ choices: ['present', 'absent', 'maintenance', 'upgraded', 'started', 'restarted', 'stopped']
+ default: present
+ comment:
+ description:
+ - "Description of the host."
+ cluster:
+ description:
+ - "Name of the cluster, where host should be created."
+ address:
+ description:
+ - "Host address. It can be either FQDN (preferred) or IP address."
+ password:
+ description:
+ - "Password of the root. It's required in case C(public_key) is set to I(False)."
+ public_key:
+ description:
+ - "I(True) if the public key should be used to authenticate to host."
+ - "It's required in case C(password) is not set."
+ default: False
+ aliases: ['ssh_public_key']
+ kdump_integration:
+ description:
+ - "Specify if host will have enabled Kdump integration."
+ choices: ['enabled', 'disabled']
+ default: enabled
+ spm_priority:
+ description:
+ - "SPM priority of the host. Integer value from 1 to 10, where higher number means higher priority."
+ override_iptables:
+ description:
+ - "If True host iptables will be overridden by host deploy script."
+ force:
+ description:
+ - "If True host will be forcibly moved to desired state."
+ default: False
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add host with username/password
+- ovirt_hosts:
+ cluster: Default
+ name: myhost
+ address: 10.34.61.145
+ password: secret
+
+# Add host using public key
+- ovirt_hosts:
+ public_key: true
+ cluster: Default
+ name: myhost2
+ address: 10.34.61.145
+
+# Maintenance
+- ovirt_hosts:
+ state: maintenance
+ name: myhost
+
+# Restart host using power management:
+- ovirt_hosts:
+ state: restarted
+ name: myhost
+
+# Upgrade host
+- ovirt_hosts:
+ state: upgraded
+ name: myhost
+
+# Remove host
+- ovirt_hosts:
+ state: absent
+ name: myhost
+ force: True
+'''
+
+RETURN = '''
+id:
+ description: ID of the host which is managed
+ returned: On success if host is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+host:
+ description: "Dictionary of all the host attributes. Host attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/host."
+ returned: On success if host is found.
+'''
+
+
+class HostsModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.Host(
+ name=self._module.params['name'],
+ cluster=otypes.Cluster(
+ name=self._module.params['cluster']
+ ) if self._module.params['cluster'] else None,
+ comment=self._module.params['comment'],
+ address=self._module.params['address'],
+ root_password=self._module.params['password'],
+ ssh=otypes.Ssh(
+ authentication_method='publickey',
+ ) if self._module.params['public_key'] else None,
+ kdump_status=otypes.KdumpStatus(
+ self._module.params['kdump_integration']
+ ) if self._module.params['kdump_integration'] else None,
+ spm=otypes.Spm(
+ priority=self._module.params['spm_priority'],
+ ) if self._module.params['spm_priority'] else None,
+ override_iptables=self._module.params['override_iptables'],
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('comment'), entity.comment) and
+ equal(self._module.params.get('kdump_integration'), entity.kdump_status) and
+ equal(self._module.params.get('spm_priority'), entity.spm.priority)
+ )
+
+ def pre_remove(self, entity):
+ self.action(
+ entity=entity,
+ action='deactivate',
+ action_condition=lambda h: h.status != hoststate.MAINTENANCE,
+ wait_condition=lambda h: h.status == hoststate.MAINTENANCE,
+ )
+
+ def post_update(self, entity):
+ if entity.status != hoststate.UP:
+ if not self._module.check_mode:
+ self._service.host_service(entity.id).activate()
+ self.changed = True
+
+
+def failed_state(host):
+ return host.status in [
+ hoststate.ERROR,
+ hoststate.INSTALL_FAILED,
+ hoststate.NON_RESPONSIVE,
+ hoststate.NON_OPERATIONAL,
+ ]
+
+
+def control_state(host_module):
+ host = host_module.search_entity()
+ if host is None:
+ return
+
+ state = host_module._module.params['state']
+ host_service = host_module._service.service(host.id)
+ if failed_state(host):
+ raise Exception("Not possible to manage host '%s'." % host.name)
+ elif host.status in [
+ hoststate.REBOOT,
+ hoststate.CONNECTING,
+ hoststate.INITIALIZING,
+ hoststate.INSTALLING,
+ hoststate.INSTALLING_OS,
+ ]:
+ wait(
+ service=host_service,
+ condition=lambda host: host.status == hoststate.UP,
+ fail_condition=failed_state,
+ )
+ elif host.status == hoststate.PREPARING_FOR_MAINTENANCE:
+ wait(
+ service=host_service,
+ condition=lambda host: host.status == hoststate.MAINTENANCE,
+ fail_condition=failed_state,
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent', 'maintenance', 'upgraded', 'started', 'restarted', 'stopped'],
+ default='present',
+ ),
+ name=dict(required=True),
+ comment=dict(default=None),
+ cluster=dict(default=None),
+ address=dict(default=None),
+ password=dict(default=None),
+ public_key=dict(default=False, type='bool', aliases=['ssh_public_key']),
+ kdump_integration=dict(default=None, choices=['enabled', 'disabled']),
+ spm_priority=dict(default=None, type='int'),
+ override_iptables=dict(default=None, type='bool'),
+ force=dict(default=False, type='bool'),
+ timeout=dict(default=600, type='int'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ hosts_service = connection.system_service().hosts_service()
+ hosts_module = HostsModule(
+ connection=connection,
+ module=module,
+ service=hosts_service,
+ )
+
+ state = module.params['state']
+ control_state(hosts_module)
+ if state == 'present':
+ ret = hosts_module.create()
+ hosts_module.action(
+ action='activate',
+ action_condition=lambda h: h.status == hoststate.MAINTENANCE,
+ wait_condition=lambda h: h.status == hoststate.UP,
+ fail_condition=failed_state,
+ )
+ elif state == 'absent':
+ ret = hosts_module.remove()
+ elif state == 'maintenance':
+ ret = hosts_module.action(
+ action='deactivate',
+ action_condition=lambda h: h.status != hoststate.MAINTENANCE,
+ wait_condition=lambda h: h.status == hoststate.MAINTENANCE,
+ fail_condition=failed_state,
+ )
+ elif state == 'upgraded':
+ ret = hosts_module.action(
+ action='upgrade',
+ action_condition=lambda h: h.update_available,
+ wait_condition=lambda h: h.status == hoststate.UP,
+ fail_condition=failed_state,
+ )
+ elif state == 'started':
+ ret = hosts_module.action(
+ action='fence',
+ action_condition=lambda h: h.status == hoststate.DOWN,
+ wait_condition=lambda h: h.status in [hoststate.UP, hoststate.MAINTENANCE],
+ fail_condition=failed_state,
+ fence_type='start',
+ )
+ elif state == 'stopped':
+ hosts_module.action(
+ action='deactivate',
+ action_condition=lambda h: h.status not in [hoststate.MAINTENANCE, hoststate.DOWN],
+ wait_condition=lambda h: h.status in [hoststate.MAINTENANCE, hoststate.DOWN],
+ fail_condition=failed_state,
+ )
+ ret = hosts_module.action(
+ action='fence',
+ action_condition=lambda h: h.status != hoststate.DOWN,
+ wait_condition=lambda h: h.status == hoststate.DOWN,
+ fail_condition=failed_state,
+ fence_type='stop',
+ )
+ elif state == 'restarted':
+ ret = hosts_module.action(
+ action='fence',
+ wait_condition=lambda h: h.status == hoststate.UP,
+ fail_condition=failed_state,
+ fence_type='restart',
+ )
+
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ finally:
+ connection.close(logout=False)
+
+
+from ansible.module_utils.basic import *
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py
new file mode 100644
index 0000000000..ad1945e538
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_hosts_facts.py
@@ -0,0 +1,99 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+try:
+ import ovirtsdk4 as sdk
+except ImportError:
+ pass
+
+from ansible.module_utils.ovirt import *
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_hosts_facts
+short_description: Retrieve facts about one or more oVirt hosts
+author: "Ondra Machacek (@machacekondra)"
+version_added: "2.3"
+description:
+ - "Retrieve facts about one or more oVirt hosts."
+notes:
+ - "This module creates a new top-level C(ovirt_hosts) fact, which
+ contains a list of hosts."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt search backend."
+ - "For example to search host X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather facts about all hosts which names start with C(host) and
+# belong to data center C(west):
+- ovirt_hosts_facts:
+ pattern: name=host* and datacenter=west
+- debug:
+ var: ovirt_hosts
+'''
+
+RETURN = '''
+ovirt_hosts:
+ description: "List of dictionaries describing the hosts. Host attribues are mapped to dictionary keys,
+ all hosts attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/host."
+ returned: On success.
+ type: list
+'''
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ hosts_service = connection.system_service().hosts_service()
+ hosts = hosts_service.list(search=module.params['pattern'])
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ ovirt_hosts=[
+ get_dict_of_struct(c) for c in hosts
+ ],
+ ),
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.basic import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_mac_pools.py b/lib/ansible/modules/cloud/ovirt/ovirt_mac_pools.py
new file mode 100644
index 0000000000..622f57d89d
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_mac_pools.py
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ equal,
+ create_connection,
+ ovirt_full_argument_spec,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_mac_pools
+short_description: Module to manage MAC pools in oVirt
+version_added: "2.3"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "This module manage MAC pools in oVirt."
+options:
+ name:
+ description:
+ - "Name of the the MAC pool to manage."
+ required: true
+ description:
+ description:
+ - "Description of the MAC pool."
+ state:
+ description:
+ - "Should the mac pool be present or absent."
+ choices: ['present', 'absent']
+ default: present
+ allow_duplicates:
+ description:
+ - "If (true) allow a MAC address to be used multiple times in a pool."
+ - "Default value is set by oVirt engine to I(false)."
+ ranges:
+ description:
+ - "List of MAC ranges. The from and to should be splitted by comma."
+ - "For example: 00:1a:4a:16:01:51,00:1a:4a:16:01:61"
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create MAC pool:
+- ovirt_mac_pools:
+ name: mymacpool
+ allow_duplicates: false
+ ranges:
+ - 00:1a:4a:16:01:51,00:1a:4a:16:01:61
+ - 00:1a:4a:16:02:51,00:1a:4a:16:02:61
+
+# Remove MAC pool:
+- ovirt_mac_pools:
+ state: absent
+ name: mymacpool
+'''
+
+RETURN = '''
+id:
+ description: ID of the MAC pool which is managed
+ returned: On success if MAC pool is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+template:
+ description: "Dictionary of all the MAC pool attributes. MAC pool attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/mac_pool."
+ returned: On success if MAC pool is found.
+'''
+
+
+class MACPoolModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.MacPool(
+ name=self._module.params['name'],
+ allow_duplicates=self._module.params['allow_duplicates'],
+ description=self._module.params['description'],
+ ranges=[
+ otypes.Range(
+ from_=mac_range.split(',')[0],
+ to=mac_range.split(',')[1],
+ )
+ for mac_range in self._module.params['ranges']
+ ],
+ )
+
+ def _compare_ranges(self, entity):
+ if self._module.params['ranges'] is not None:
+ ranges = sorted([
+ '%s,%s' % (mac_range.from_, mac_range.to)
+ for mac_range in entity.ranges
+ ])
+ return equal(sorted(self._module.params['ranges']), ranges)
+
+ return True
+
+ def update_check(self, entity):
+ return (
+ self._compare_ranges(entity) and
+ equal(self._module.params['allow_duplicates'], entity.allow_duplicates) and
+ equal(self._module.params['description'], entity.description)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(default=None, required=True),
+ allow_duplicates=dict(default=None, type='bool'),
+ description=dict(default=None),
+ ranges=dict(default=None, type='list'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ mac_pools_service = connection.system_service().mac_pools_service()
+ mac_pools_module = MACPoolModule(
+ connection=connection,
+ module=module,
+ service=mac_pools_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = mac_pools_module.create()
+ elif state == 'absent':
+ ret = mac_pools_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_networks.py b/lib/ansible/modules/cloud/ovirt/ovirt_networks.py
new file mode 100644
index 0000000000..047a24d388
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_networks.py
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_params,
+ create_connection,
+ equal,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_networks
+short_description: Module to manage logical networks in oVirt
+version_added: "2.3"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage logical networks in oVirt"
+options:
+ name:
+ description:
+ - "Name of the the network to manage."
+ required: true
+ state:
+ description:
+ - "Should the network be present or absent"
+ choices: ['present', 'absent']
+ default: present
+ datacenter:
+ description:
+ - "Datacenter name where network reside."
+ description:
+ description:
+ - "Description of the network."
+ comment:
+ description:
+ - "Comment of the network."
+ vlan_tag:
+ description:
+ - "Specify VLAN tag."
+ vm_network:
+ description:
+ - "If I(True) network will be marked as network for VM."
+ - "VM network carries traffic relevant to the virtual machine."
+ mtu:
+ description:
+ - "Maximum transmission unit (MTU) of the network."
+ clusters:
+ description:
+ - "List of dictionaries describing how the network is managed in specific cluster."
+ - "C(name) - Cluster name."
+ - "C(assigned) - I(true) if the network should be assigned to cluster. Default is I(true)."
+ - "C(required) - I(true) if the network must remain operational for all hosts associated with this network."
+ - "C(display) - I(true) if the network should marked as display network."
+ - "C(migration) - I(true) if the network should marked as migration network."
+ - "C(gluster) - I(true) if the network should marked as gluster network."
+
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create network
+- ovirt_networks:
+ datacenter: mydatacenter
+ name: mynetwork
+ vlan_tag: 1
+ vm_network: true
+
+# Remove network
+- ovirt_networks:
+ state: absent
+ name: mynetwork
+'''
+
+RETURN = '''
+id:
+ description: "ID of the managed network"
+ returned: "On success if network is found."
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+network:
+ description: "Dictionary of all the network attributes. Network attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/network."
+ returned: "On success if network is found."
+'''
+
+
+class NetworksModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.Network(
+ name=self._module.params['name'],
+ comment=self._module.params['comment'],
+ description=self._module.params['description'],
+ data_center=otypes.DataCenter(
+ name=self._module.params['datacenter'],
+ ) if self._module.params['datacenter'] else None,
+ vlan=otypes.Vlan(
+ self._module.params['vlan_tag'],
+ ) if self._module.params['vlan_tag'] else None,
+ usages=[
+ otypes.NetworkUsage.VM if self._module.params['vm_network'] else None
+ ] if self._module.params['vm_network'] is not None else None,
+ mtu=self._module.params['mtu'],
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('comment'), entity.comment) and
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('vlan_tag'), getattr(entity.vlan, 'id', None)) and
+ equal(self._module.params.get('vm_network'), True if entity.usages else False) and
+ equal(self._module.params.get('mtu'), entity.mtu)
+ )
+
+
+class ClusterNetworksModule(BaseModule):
+
+ def __init__(self, network_id, cluster_network, *args, **kwargs):
+ super(ClusterNetworksModule, self).__init__(*args, **kwargs)
+ self._network_id = network_id
+ self._cluster_network = cluster_network
+
+ def build_entity(self):
+ return otypes.Network(
+ id=self._network_id,
+ name=self._module.params['name'],
+ required=self._cluster_network.get('required'),
+ display=self._cluster_network.get('display'),
+ usages=[
+ otypes.NetworkUsage(usage)
+ for usage in ['display', 'gluster', 'migration']
+ if self._cluster_network.get(usage, False)
+ ] if (
+ self._cluster_network.get('display') is not None or
+ self._cluster_network.get('gluster') is not None or
+ self._cluster_network.get('migration') is not None
+ ) else None,
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self._cluster_network.get('required'), entity.required) and
+ equal(self._cluster_network.get('display'), entity.display) and
+ equal(
+ sorted([
+ usage
+ for usage in ['display', 'gluster', 'migration']
+ if self._cluster_network.get(usage, False)
+ ]),
+ sorted([
+ str(usage)
+ for usage in getattr(entity, 'usages', [])
+ # VM + MANAGEMENT is part of root network
+ if usage != otypes.NetworkUsage.VM and usage != otypes.NetworkUsage.MANAGEMENT
+ ]),
+ )
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ datacenter=dict(default=None, required=True),
+ name=dict(default=None, required=True),
+ description=dict(default=None),
+ comment=dict(default=None),
+ vlan_tag=dict(default=None, type='int'),
+ vm_network=dict(default=None, type='bool'),
+ mtu=dict(default=None, type='int'),
+ clusters=dict(default=None, type='list'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ clusters_service = connection.system_service().clusters_service()
+ networks_service = connection.system_service().networks_service()
+ networks_module = NetworksModule(
+ connection=connection,
+ module=module,
+ service=networks_service,
+ )
+ state = module.params['state']
+ network = networks_module.search_entity(
+ search_params={
+ 'name': module.params['name'],
+ 'datacenter': module.params['datacenter'],
+ },
+ )
+ if state == 'present':
+ ret = networks_module.create(entity=network)
+
+ # Update clusters networks:
+ for param_cluster in module.params.get('clusters', []):
+ cluster = search_by_name(clusters_service, param_cluster.get('name', None))
+ if cluster is None:
+ raise Exception("Cluster '%s' was not found." % cluster_name)
+ cluster_networks_service = clusters_service.service(cluster.id).networks_service()
+ cluster_networks_module = ClusterNetworksModule(
+ network_id=ret['id'],
+ cluster_network=param_cluster,
+ connection=connection,
+ module=module,
+ service=cluster_networks_service,
+ )
+ if param_cluster.get('assigned', True):
+ ret = cluster_networks_module.create()
+ else:
+ ret = cluster_networks_module.remove()
+
+ elif state == 'absent':
+ ret = networks_module.remove(entity=network)
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_networks_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_networks_facts.py
new file mode 100644
index 0000000000..974acbf95d
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_networks_facts.py
@@ -0,0 +1,104 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_full_argument_spec,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_networks_facts
+short_description: Retrieve facts about one or more oVirt networks
+author: "Ondra Machacek (@machacekondra)"
+version_added: "2.3"
+description:
+ - "Retrieve facts about one or more oVirt networks."
+notes:
+ - "This module creates a new top-level C(ovirt_networks) fact, which
+ contains a list of networks."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt search backend."
+ - "For example to search network starting with string vlan1 use: name=vlan1*"
+extends_documentation_fragment: ovirt
+'''
+
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather facts about all networks which names start with C(vlan1):
+- ovirt_networks_facts:
+ pattern: name=vlan1*
+- debug:
+ var: ovirt_networks
+'''
+
+
+RETURN = '''
+ovirt_networks:
+ description: "List of dictionaries describing the networks. Network attribues are mapped to dictionary keys,
+ all networks attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/network."
+ returned: On success.
+ type: list
+'''
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ networks_service = connection.system_service().networks_service()
+ networks = networks_service.list(search=module.params['pattern'])
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ ovirt_networks=[
+ get_dict_of_struct(c) for c in networks
+ ],
+ ),
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_nics.py b/lib/ansible/modules/cloud/ovirt/ovirt_nics.py
new file mode 100644
index 0000000000..f0513503a9
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_nics.py
@@ -0,0 +1,247 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ get_link_name,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_nics
+short_description: Module to manage network interfaces of Virtual Machines in oVirt
+version_added: "2.3"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage network interfaces of Virtual Machines in oVirt."
+options:
+ name:
+ description:
+ - "Name of the network interface to manage."
+ required: true
+ vm:
+ description:
+ - "Name of the Virtual Machine to manage."
+ required: true
+ state:
+ description:
+ - "Should the Virtual Machine NIC be present/absent/plugged/unplugged."
+ choices: ['present', 'absent', 'plugged', 'unplugged']
+ default: present
+ network:
+ description:
+ - "Logical network to which the VM network interface should use,
+ by default Empty network is used if network is not specified."
+ profile:
+ description:
+ - "Virtual network interface profile to be attached to VM network interface."
+ interface:
+ description:
+ - "Type of the network interface."
+ choices: ['virtio', 'e1000', 'rtl8139', 'pci_passthrough', 'rtl8139_virtio', 'spapr_vlan']
+ default: 'virtio'
+ mac_address:
+ description:
+ - "Custom MAC address of the network interface, by default it's obtained from MAC pool."
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add NIC to VM
+- ovirt_nics:
+ state: present
+ vm: myvm
+ name: mynic
+ interface: e1000
+ mac_address: 00:1a:4a:16:01:56
+ profile: ovirtmgmt
+ network: ovirtmgmt
+
+# Plug NIC to VM
+- ovirt_nics:
+ state: plugged
+ vm: myvm
+ name: mynic
+
+# Unplug NIC from VM
+- ovirt_nics:
+ state: unplugged
+ vm: myvm
+ name: mynic
+
+# Remove NIC from VM
+- ovirt_nics:
+ state: absent
+ vm: myvm
+ name: mynic
+'''
+
+RETURN = '''
+id:
+ description: ID of the network interface which is managed
+ returned: On success if network interface is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+nic:
+ description: "Dictionary of all the network interface attributes. Network interface attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/nic."
+ returned: On success if network interface is found.
+'''
+
+
+class VmNicsModule(BaseModule):
+
+ def __init__(self, *args, **kwargs):
+ super(VmNicsModule, self).__init__(*args, **kwargs)
+ self.vnic_id = None
+
+ @property
+ def vnic_id(self):
+ return self._vnic_id
+
+ @vnic_id.setter
+ def vnic_id(self, vnic_id):
+ self._vnic_id = vnic_id
+
+ def build_entity(self):
+ return otypes.Nic(
+ name=self._module.params.get('name'),
+ interface=otypes.NicInterface(
+ self._module.params.get('interface')
+ ) if self._module.params.get('interface') else None,
+ vnic_profile=otypes.VnicProfile(
+ id=self.vnic_id,
+ ) if self.vnic_id else None,
+ mac=otypes.Mac(
+ address=self._module.params.get('mac_address')
+ ) if self._module.params.get('mac_address') else None,
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('interface'), str(entity.interface)) and
+ equal(self._module.params.get('profile'), get_link_name(self._connection, entity.vnic_profile)) and
+ equal(self._module.params.get('mac_address'), entity.mac.address)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent', 'plugged', 'unplugged'],
+ default='present'
+ ),
+ vm=dict(required=True),
+ name=dict(required=True),
+ interface=dict(default=None),
+ profile=dict(default=None),
+ network=dict(default=None),
+ mac_address=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+
+ try:
+ # Locate the service that manages the virtual machines and use it to
+ # search for the NIC:
+ connection = create_connection(module.params.pop('auth'))
+ vms_service = connection.system_service().vms_service()
+
+ # Locate the VM, where we will manage NICs:
+ vm_name = module.params.get('vm')
+ vm = search_by_name(vms_service, vm_name)
+ if vm is None:
+ raise Exception("VM '%s' was not found." % vm_name)
+
+ # Locate the service that manages the virtual machines NICs:
+ vm_service = vms_service.vm_service(vm.id)
+ nics_service = vm_service.nics_service()
+ vmnics_module = VmNicsModule(
+ connection=connection,
+ module=module,
+ service=nics_service,
+ )
+
+ # Find vNIC id of the network interface (if any):
+ profile = module.params.get('profile')
+ if profile and module.params['network']:
+ cluster_name = get_link_name(connection, vm.cluster)
+ dcs_service = connection.system_service().data_centers_service()
+ dc = dcs_service.list(search='Clusters.name=%s' % cluster_name)[0]
+ networks_service = dcs_service.service(dc.id).networks_service()
+ network = search_by_name(networks_service, module.params['network'])
+ for vnic in connection.system_service().vnic_profiles_service().list():
+ if vnic.name == profile and vnic.network.id == network.id:
+ vmnics_module.vnic_id = vnic.id
+
+ # Handle appropriate action:
+ state = module.params['state']
+ if state == 'present':
+ ret = vmnics_module.create()
+ elif state == 'absent':
+ ret = vmnics_module.remove()
+ elif state == 'plugged':
+ vmnics_module.create()
+ ret = vmnics_module.action(
+ action='activate',
+ action_condition=lambda nic: not nic.plugged,
+ wait_condition=lambda nic: nic.plugged,
+ )
+ elif state == 'unplugged':
+ vmnics_module.create()
+ ret = vmnics_module.action(
+ action='deactivate',
+ action_condition=lambda nic: nic.plugged,
+ wait_condition=lambda nic: not nic.plugged,
+ )
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_nics_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_nics_facts.py
new file mode 100644
index 0000000000..ab5fcdad72
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_nics_facts.py
@@ -0,0 +1,122 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_nics_facts
+short_description: Retrieve facts about one or more oVirt virtual machine network interfaces
+author: "Ondra Machacek (@machacekondra)"
+version_added: "2.3"
+description:
+ - "Retrieve facts about one or more oVirt virtual machine network interfaces."
+notes:
+ - "This module creates a new top-level C(ovirt_nics) fact, which
+ contains a list of NICs."
+options:
+ vm:
+ description:
+ - "Name of the VM where NIC is attached."
+ required: true
+ name:
+ description:
+ - "Name of the NIC, can be used as glob expression."
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather facts about all NICs which names start with C(eth) for VM named C(centos7):
+- ovirt_nics_facts:
+ vm: centos7
+ name: eth*
+- debug:
+ var: ovirt_nics
+'''
+
+RETURN = '''
+ovirt_nics:
+ description: "List of dictionaries describing the network interfaces. NIC attribues are mapped to dictionary keys,
+ all NICs attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/nic."
+ returned: On success.
+ type: list
+'''
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ vm=dict(required=True),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ vms_service = connection.system_service().vms_service()
+ vm_name = module.params['vm']
+ vm = search_by_name(vms_service, vm_name)
+ if vm is None:
+ raise Exception("VM '%s' was not found." % vm_name)
+
+ nics_service = vms_service.service(vm.id).nics_service()
+ if module.params['name']:
+ nics = [
+ e for e in nics_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ nics = nics_service.list()
+
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ ovirt_nics=[
+ get_dict_of_struct(c) for c in nics
+ ],
+ ),
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_permissions.py b/lib/ansible/modules/cloud/ovirt/ovirt_permissions.py
new file mode 100644
index 0000000000..6ea833599a
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_permissions.py
@@ -0,0 +1,291 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ follow_link,
+ get_link_name,
+ ovirt_full_argument_spec,
+ search_by_attributes,
+ search_by_name,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_permissions
+short_description: "Module to manage permissions of users/groups in oVirt"
+version_added: "2.3"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage permissions of users/groups in oVirt"
+options:
+ role:
+ description:
+ - "Name of the the role to be assigned to user/group on specific object."
+ default: UserRole
+ state:
+ description:
+ - "Should the permission be present/absent."
+ choices: ['present', 'absent']
+ default: present
+ object_id:
+ description:
+ - "ID of the object where the permissions should be managed."
+ object_name:
+ description:
+ - "Name of the object where the permissions should be managed."
+ object_type:
+ description:
+ - "The object where the permissions should be managed."
+ default: 'virtual_machine'
+ choices: [
+ 'data_center',
+ 'cluster',
+ 'host',
+ 'storage_domain',
+ 'network',
+ 'disk',
+ 'vm',
+ 'vm_pool',
+ 'template',
+ ]
+ user_name:
+ description:
+ - "Username of the the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user."
+ group_name:
+ description:
+ - "Name of the the group to manage."
+ authz_name:
+ description:
+ - "Authorization provider of the user/group. In previous versions of oVirt known as domain."
+ required: true
+ aliases: ['domain']
+ namespace:
+ description:
+ - "Namespace of the authorization provider, where user/group resides."
+ required: false
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add user user1 from authorization provider example.com-authz
+- ovirt_permissions:
+ user_name: user1
+ authz_name: example.com-authz
+ object_type: vm
+ object_name: myvm
+ role: UserVmManager
+
+# Remove permission from user
+- ovirt_permissions:
+ state: absent
+ user_name: user1
+ authz_name: example.com-authz
+ object_type: cluster
+ object_name: mycluster
+ role: ClusterAdmin
+'''
+
+RETURN = '''
+id:
+ description: ID of the permission which is managed
+ returned: On success if permission is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+permission:
+ description: "Dictionary of all the permission attributes. Permission attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/permission."
+ returned: On success if permission is found.
+'''
+
+
+def _objects_service(connection, object_type):
+ return getattr(
+ connection.system_service(),
+ '%ss_service' % object_type,
+ None,
+ )()
+
+
+def _object_service(connection, module):
+ object_type = module.params['object_type']
+ objects_service = _objects_service(connection, object_type)
+
+ object_id = module.params['object_id']
+ if object_id is None:
+ sdk_object = search_by_name(objects_service, module.params['object_name'])
+ if sdk_object is None:
+ raise Exception(
+ "'%s' object '%s' was not found." % (
+ module.params['object_type'],
+ module.params['object_name']
+ )
+ )
+ object_id = sdk_object.id
+
+ return objects_service.service(object_id)
+
+
+def _permission(module, permissions_service, connection):
+ for permission in permissions_service.list():
+ user = follow_link(connection, permission.user)
+ if (
+ equal(module.params['user_name'], user.principal if user else None) and
+ equal(module.params['group_name'], get_link_name(connection, permission.group)) and
+ equal(module.params['role'], get_link_name(connection, permission.role))
+ ):
+ return permission
+
+
+class PermissionsModule(BaseModule):
+
+ def _user(self):
+ user = search_by_attributes(
+ self._connection.system_service().users_service(),
+ usrname="{name}@{authz_name}".format(
+ name=self._module.params['user_name'],
+ authz_name=self._module.params['authz_name'],
+ ),
+ )
+ if user is None:
+ raise Exception("User '%s' was not found." % self._module.params['user_name'])
+ return user
+
+ def _group(self):
+ groups = self._connection.system_service().groups_service().list(
+ search="name={name}".format(
+ name=self._module.params['group_name'],
+ )
+ )
+
+ # If found more groups, filter them by namespace and authz name:
+ # (filtering here, as oVirt backend doesn't support it)
+ if len(groups) > 1:
+ groups = [
+ g for g in groups if (
+ equal(self._module.params['namespace'], g.namespace) and
+ equal(self._module.params['authz_name'], g.domain.name)
+ )
+ ]
+ if not groups:
+ raise Exception("Group '%s' was not found." % self._module.params['group_name'])
+ return groups[0]
+
+ def build_entity(self):
+ entity = self._group() if self._module.params['group_name'] else self._user()
+
+ return otypes.Permission(
+ user=otypes.User(
+ id=entity.id
+ ) if self._module.params['user_name'] else None,
+ group=otypes.Group(
+ id=entity.id
+ ) if self._module.params['group_name'] else None,
+ role=otypes.Role(
+ name=self._module.params['role']
+ ),
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ role=dict(default='UserRole'),
+ object_type=dict(
+ default='virtual_machine',
+ choices=[
+ 'data_center',
+ 'cluster',
+ 'host',
+ 'storage_domain',
+ 'network',
+ 'disk',
+ 'vm',
+ 'vm_pool',
+ 'template',
+ ]
+ ),
+ authz_name=dict(required=True, aliases=['domain']),
+ object_id=dict(default=None),
+ object_name=dict(default=None),
+ user_name=dict(rdefault=None),
+ group_name=dict(default=None),
+ namespace=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+
+ if module.params['object_name'] is None and module.params['object_id'] is None:
+ module.fail_json(msg='"object_name" or "object_id" is required')
+
+ if module.params['user_name'] is None and module.params['group_name'] is None:
+ module.fail_json(msg='"user_name" or "group_name" is required')
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ permissions_service = _object_service(connection, module).permissions_service()
+ permissions_module = PermissionsModule(
+ connection=connection,
+ module=module,
+ service=permissions_service,
+ )
+
+ permission = _permission(module, permissions_service, connection)
+ state = module.params['state']
+ if state == 'present':
+ ret = permissions_module.create(entity=permission)
+ elif state == 'absent':
+ ret = permissions_module.remove(entity=permission)
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_permissions_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_permissions_facts.py
new file mode 100644
index 0000000000..6c855f6296
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_permissions_facts.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+try:
+ import ovirtsdk4 as sdk
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_link_name,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_permissions_facts
+short_description: Retrieve facts about one or more oVirt permissions
+author: "Ondra Machacek (@machacekondra)"
+version_added: "2.3"
+description:
+ - "Retrieve facts about one or more oVirt permissions."
+notes:
+ - "This module creates a new top-level C(ovirt_permissions) fact, which
+ contains a list of permissions."
+options:
+ user_name:
+ description:
+ - "Username of the the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user."
+ group_name:
+ description:
+ - "Name of the the group to manage."
+ authz_name:
+ description:
+ - "Authorization provider of the user/group. In previous versions of oVirt known as domain."
+ required: true
+ aliases: ['domain']
+ namespace:
+ description:
+ - "Namespace of the authorization provider, where user/group resides."
+ required: false
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather facts about all permissions of user with username C(john):
+- ovirt_permissions_facts:
+ user_name: john
+ authz_name: example.com-authz
+- debug:
+ var: ovirt_permissions
+'''
+
+RETURN = '''
+ovirt_permissions:
+ description: "List of dictionaries describing the permissions. Permission attribues are mapped to dictionary keys,
+ all permissions attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/permission."
+ returned: On success.
+ type: list
+'''
+
+
+def _permissions_service(connection, module):
+ if module.params['user_name']:
+ service = connection.system_service().users_service()
+ entity = search_by_name(service, module.params['user_name'])
+ else:
+ service = connection.system_service().groups_service()
+ entity = search_by_name(service, module.params['group_name'])
+
+ if entity is None:
+ raise Exception("User/Group wasn't found.")
+
+ return service.service(entity.id).permissions_service()
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ authz_name=dict(required=True, aliases=['domain']),
+ user_name=dict(rdefault=None),
+ group_name=dict(default=None),
+ namespace=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ permissions_service = _permissions_service(connection, module)
+ permissions = []
+ for p in permissions_service.list():
+ newperm = dict()
+ for key, value in p.__dict__.items():
+ if value and isinstance(value, sdk.Struct):
+ newperm[key[1:]] = get_link_name(connection, value)
+ permissions.append(newperm)
+
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(ovirt_permissions=permissions),
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_quotas.py b/lib/ansible/modules/cloud/ovirt/ovirt_quotas.py
new file mode 100644
index 0000000000..d9b94afa20
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_quotas.py
@@ -0,0 +1,298 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ get_link_name,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_quotas
+short_description: Module to manage datacenter quotas in oVirt
+version_added: "2.3"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage datacenter quotas in oVirt"
+options:
+ name:
+ description:
+ - "Name of the the quota to manage."
+ required: true
+ state:
+ description:
+ - "Should the quota be present/absent."
+ choices: ['present', 'absent']
+ default: present
+ datacenter:
+ description:
+ - "Name of the datacenter where quota should be managed."
+ required: true
+ description:
+ description:
+ - "Description of the the quota to manage."
+ cluster_threshold:
+ description:
+ - "Cluster threshold(soft limit) defined in percentage (0-100)."
+ cluster_grace:
+ description:
+ - "Cluster grace(hard limit) defined in percentage (1-100)."
+ storage_threshold:
+ description:
+ - "Storage threshold(soft limit) defined in percentage (0-100)."
+ storage_grace:
+ description:
+ - "Storage grace(hard limit) defined in percentage (1-100)."
+ clusters:
+ description:
+ - "List of dictionary of cluster limits, which is valid to specific cluster."
+ - "If cluster isn't spefied it's valid to all clusters in system:"
+ - "C(cluster) - Name of the cluster."
+ - "C(memory) - Memory limit (in GiB)."
+ - "C(cpu) - CPU limit."
+ storages:
+ description:
+ - "List of dictionary of storage limits, which is valid to specific storage."
+ - "If storage isn't spefied it's valid to all storages in system:"
+ - "C(storage) - Name of the storage."
+ - "C(size) - Size limit (in GiB)."
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add cluster quota to cluster cluster1 with memory limit 20GiB and CPU limit to 10:
+ovirt_quotas:
+ name: quota1
+ datacenter: dcX
+ clusters:
+ - name: cluster1
+ memory: 20
+ cpu: 10
+
+# Add cluster quota to all clusters with memory limit 30GiB and CPU limit to 15:
+ovirt_quotas:
+ name: quota2
+ datacenter: dcX
+ clusters:
+ - memory: 30
+ cpu: 15
+
+# Add storage quota to storage data1 with size limit to 100GiB
+ovirt_quotas:
+ name: quota3
+ datacenter: dcX
+ storage_grace: 40
+ storage_threshold: 60
+ storages:
+ - name: data1
+ size: 100
+
+# Remove quota quota1 (Note the quota must not be assigned to any VM/disk):
+ovirt_quotas:
+ state: absent
+ datacenter: dcX
+ name: quota1
+'''
+
+RETURN = '''
+id:
+ description: ID of the quota which is managed
+ returned: On success if quota is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+quota:
+ description: "Dictionary of all the quota attributes. Quota attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/quota."
+ returned: On success if quota is found.
+'''
+
+
+class QuotasModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.Quota(
+ description=self._module.params['description'],
+ name=self._module.params['name'],
+ storage_hard_limit_pct=self._module.params.get('storage_grace'),
+ storage_soft_limit_pct=self._module.params.get('storage_threshold'),
+ cluster_hard_limit_pct=self._module.params.get('cluster_grace'),
+ cluster_soft_limit_pct=self._module.params.get('cluster_threshold'),
+ )
+
+ def update_storage_limits(self, entity):
+ new_limits = {}
+ for storage in self._module.params.get('storages'):
+ new_limits[storage.get('name', '')] = {
+ 'size': storage.get('size'),
+ }
+
+ old_limits = {}
+ sd_limit_service = self._service.service(entity.id).quota_storage_limits_service()
+ for limit in sd_limit_service.list():
+ storage = get_link_name(self._connection, limit.storage_domain) if limit.storage_domain else ''
+ old_limits[storage] = {
+ 'size': limit.limit,
+ }
+ sd_limit_service.service(limit.id).remove()
+
+ return new_limits == old_limits
+
+ def update_cluster_limits(self, entity):
+ new_limits = {}
+ for cluster in self._module.params.get('clusters'):
+ new_limits[cluster.get('name', '')] = {
+ 'cpu': cluster.get('cpu'),
+ 'memory': float(cluster.get('memory')),
+ }
+
+ old_limits = {}
+ cl_limit_service = self._service.service(entity.id).quota_cluster_limits_service()
+ for limit in cl_limit_service.list():
+ cluster = get_link_name(self._connection, limit.cluster) if limit.cluster else ''
+ old_limits[cluster] = {
+ 'cpu': limit.vcpu_limit,
+ 'memory': limit.memory_limit,
+ }
+ cl_limit_service.service(limit.id).remove()
+
+ return new_limits == old_limits
+
+ def update_check(self, entity):
+ # -- FIXME --
+ # Note that we here always remove all cluster/storage limits, because
+ # it's not currently possible to update them and then re-create the limits
+ # appropriatelly, this shouldn't have any side-effects, but it's not considered
+ # as a correct approach.
+ # This feature is tracked here: https://bugzilla.redhat.com/show_bug.cgi?id=1398576
+ #
+
+ return (
+ self.update_storage_limits(entity) and
+ self.update_cluster_limits(entity) and
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('storage_grace'), entity.storage_hard_limit_pct) and
+ equal(self._module.params.get('storage_threshold'), entity.storage_soft_limit_pct) and
+ equal(self._module.params.get('cluster_grace'), entity.cluster_hard_limit_pct) and
+ equal(self._module.params.get('cluster_threshold'), entity.cluster_soft_limit_pct)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(required=True),
+ datacenter=dict(required=True),
+ description=dict(default=None),
+ cluster_threshold=dict(default=None, type='int', aliases=['cluster_soft_limit']),
+ cluster_grace=dict(default=None, type='int', aliases=['cluster_hard_limit']),
+ storage_threshold=dict(default=None, type='int', aliases=['storage_soft_limit']),
+ storage_grace=dict(default=None, type='int', aliases=['storage_hard_limit']),
+ clusters=dict(default=[], type='list'),
+ storages=dict(default=[], type='list'),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ datacenters_service = connection.system_service().data_centers_service()
+ dc_name = module.params['datacenter']
+ dc_id = getattr(search_by_name(datacenters_service, dc_name), 'id', None)
+ if dc_id is None:
+ raise Exception("Datacenter '%s' was not found." % dc_name)
+
+ quotas_service = datacenters_service.service(dc_id).quotas_service()
+ quotas_module = QuotasModule(
+ connection=connection,
+ module=module,
+ service=quotas_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = quotas_module.create()
+
+ # Manage cluster limits:
+ cl_limit_service = quotas_service.service(ret['id']).quota_cluster_limits_service()
+ for cluster in module.params.get('clusters'):
+ cl_limit_service.add(
+ limit=otypes.QuotaClusterLimit(
+ memory_limit=float(cluster.get('memory')),
+ vcpu_limit=cluster.get('cpu'),
+ cluster=search_by_name(
+ connection.system_service().clusters_service(),
+ cluster.get('name')
+ ),
+ ),
+ )
+
+ # Manage storage limits:
+ sd_limit_service = quotas_service.service(ret['id']).quota_storage_limits_service()
+ for storage in module.params.get('storages'):
+ sd_limit_service.add(
+ limit=otypes.QuotaStorageLimit(
+ limit=storage.get('size'),
+ storage_domain=search_by_name(
+ connection.system_service().storage_domains_service(),
+ storage.get('name')
+ ),
+ )
+ )
+
+ elif state == 'absent':
+ ret = quotas_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_quotas_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_quotas_facts.py
new file mode 100644
index 0000000000..4553f64d39
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_quotas_facts.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import fnmatch
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_full_argument_spec,
+ search_by_name,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_quotas_facts
+short_description: Retrieve facts about one or more oVirt quotas
+version_added: "2.3"
+description:
+ - "Retrieve facts about one or more oVirt quotas."
+notes:
+ - "This module creates a new top-level C(ovirt_quotas) fact, which
+ contains a list of quotas."
+options:
+ datacenter:
+ description:
+ - "Name of the datacenter where quota resides."
+ required: true
+ name:
+ description:
+ - "Name of the quota, can be used as glob expression."
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather facts about quota named C<myquota> in Default datacenter:
+- ovirt_quotas_facts:
+ datacenter: Default
+ name: myquota
+- debug:
+ var: ovirt_quotas
+'''
+
+RETURN = '''
+ovirt_quotas:
+ description: "List of dictionaries describing the quotas. Quota attribues are mapped to dictionary keys,
+ all quotas attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/quota."
+ returned: On success.
+ type: list
+'''
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ datacenter=dict(required=True),
+ name=dict(default=None),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ datacenters_service = connection.system_service().data_centers_service()
+ dc_name = module.params['datacenter']
+ dc = search_by_name(datacenters_service, dc_name)
+ if dc is None:
+ raise Exception("Datacenter '%s' was not found." % dc_name)
+
+ quotas_service = datacenters_service.service(dc.id).quotas_service()
+ if module.params['name']:
+ quotas = [
+ e for e in quotas_service.list()
+ if fnmatch.fnmatch(e.name, module.params['name'])
+ ]
+ else:
+ quotas = quotas_service.list()
+
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ ovirt_quotas=[
+ get_dict_of_struct(c) for c in quotas
+ ],
+ ),
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_storage_domains.py b/lib/ansible/modules/cloud/ovirt/ovirt_storage_domains.py
new file mode 100644
index 0000000000..cfdd123038
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_storage_domains.py
@@ -0,0 +1,444 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+try:
+ import ovirtsdk4.types as otypes
+
+ from ovirtsdk4.types import StorageDomainStatus as sdstate
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ ovirt_full_argument_spec,
+ search_by_name,
+ wait,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_domains
+short_description: Module to manage storage domains in oVirt
+version_added: "2.3"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage storage domains in oVirt"
+options:
+ name:
+ description:
+ - "Name of the the storage domain to manage."
+ state:
+ description:
+ - "Should the storage domain be present/absent/maintenance/unattached"
+ choices: ['present', 'absent', 'maintenance', 'unattached']
+ default: present
+ description:
+ description:
+ - "Description of the storage domain."
+ comment:
+ description:
+ - "Comment of the storage domain."
+ data_center:
+ description:
+ - "Data center name where storage domain should be attached."
+ domain_function:
+ description:
+ - "Function of the storage domain."
+ choices: ['data', 'iso', 'export']
+ default: 'data'
+ aliases: ['type']
+ host:
+ description:
+ - "Host to be used to mount storage."
+ nfs:
+ description:
+ - "Dictionary with values for NFS storage type:"
+ - "C(address) - Address of the NFS server. E.g.: myserver.mydomain.com"
+ - "C(path) - Path of the mount point. E.g.: /path/to/my/data"
+ iscsi:
+ description:
+ - "Dictionary with values for iSCSI storage type:"
+ - "C(address) - Address of the iSCSI storage server."
+ - "C(port) - Port of the iSCSI storage server."
+ - "C(target) - iSCSI target."
+ - "C(lun_id) - LUN id."
+ - "C(username) - Username to be used to access storage server."
+ - "C(password) - Password of the user to be used to access storage server."
+ posixfs:
+ description:
+ - "Dictionary with values for PosixFS storage type:"
+ - "C(path) - Path of the mount point. E.g.: /path/to/my/data"
+ - "C(vfs_type) - Virtual File System type."
+ - "C(mount_options) - Option which will be passed when mounting storage."
+ glusterfs:
+ description:
+ - "Dictionary with values for GlusterFS storage type:"
+ - "C(address) - Address of the NFS server. E.g.: myserver.mydomain.com"
+ - "C(path) - Path of the mount point. E.g.: /path/to/my/data"
+ - "C(mount_options) - Option which will be passed when mounting storage."
+ fcp:
+ description:
+ - "Dictionary with values for fibre channel storage type:"
+ - "C(address) - Address of the fibre channel storage server."
+ - "C(port) - Port of the fibre channel storage server."
+ - "C(lun_id) - LUN id."
+ destroy:
+ description:
+ - "If I(True) storage domain metadata won't be cleaned, and user have to clean them manually."
+ - "This parameter is relevant only when C(state) is I(absent)."
+ format:
+ description:
+ - "If I(True) storage domain will be removed after removing it from oVirt."
+ - "This parameter is relevant only when C(state) is I(absent)."
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add data NFS storage domain
+- ovirt_storage_domains:
+ name: data_nfs
+ host: myhost
+ data_center: mydatacenter
+ nfs:
+ address: 10.34.63.199
+ path: /path/data
+
+# Add data iSCSI storage domain:
+- ovirt_storage_domains:
+ name: data_iscsi
+ host: myhost
+ data_center: mydatacenter
+ iscsi:
+ target: iqn.2016-08-09.domain-01:nickname
+ lun_id: 1IET_000d0002
+ address: 10.34.63.204
+
+# Import export NFS storage domain:
+- ovirt_storage_domains:
+ domain_function: export
+ host: myhost
+ data_center: mydatacenter
+ nfs:
+ address: 10.34.63.199
+ path: /path/export
+
+# Create ISO NFS storage domain
+- ovirt_storage_domains:
+ name: myiso
+ domain_function: iso
+ host: myhost
+ data_center: mydatacenter
+ nfs:
+ address: 10.34.63.199
+ path: /path/iso
+
+# Remove storage domain
+- ovirt_storage_domains:
+ state: absent
+ name: mystorage_domain
+ format: true
+'''
+
+RETURN = '''
+id:
+ description: ID of the storage domain which is managed
+ returned: On success if storage domain is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+storage domain:
+ description: "Dictionary of all the storage domain attributes. Storage domain attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/storage_domain."
+ returned: On success if storage domain is found.
+'''
+
+
+class StorageDomainModule(BaseModule):
+
+ def _get_storage_type(self):
+ for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp']:
+ if self._module.params.get(sd_type) is not None:
+ return sd_type
+
+ def _get_storage(self):
+ for sd_type in ['nfs', 'iscsi', 'posixfs', 'glusterfs', 'fcp']:
+ if self._module.params.get(sd_type) is not None:
+ return self._module.params.get(sd_type)
+
+ def _login(self, storage_type, storage):
+ if storage_type == 'iscsi':
+ hosts_service = self._connection.system_service().hosts_service()
+ host = search_by_name(hosts_service, self._module.params['host'])
+ hosts_service.host_service(host.id).iscsi_login(
+ iscsi=otypes.IscsiDetails(
+ username=storage.get('username'),
+ password=storage.get('password'),
+ address=storage.get('address'),
+ target=storage.get('target'),
+ ),
+ )
+
+ def build_entity(self):
+ storage_type = self._get_storage_type()
+ storage = self._get_storage()
+ self._login(storage_type, storage)
+
+ return otypes.StorageDomain(
+ name=self._module.params['name'],
+ description=self._module.params['description'],
+ comment=self._module.params['comment'],
+ type=otypes.StorageDomainType(
+ self._module.params['domain_function']
+ ),
+ host=otypes.Host(
+ name=self._module.params['host'],
+ ),
+ storage=otypes.HostStorage(
+ type=otypes.StorageType(storage_type),
+ logical_units=[
+ otypes.LogicalUnit(
+ id=storage.get('lun_id'),
+ address=storage.get('address'),
+ port=storage.get('port', 3260),
+ target=storage.get('target'),
+ username=storage.get('username'),
+ password=storage.get('password'),
+ ),
+ ] if storage_type in ['iscsi', 'fcp'] else None,
+ mount_options=storage.get('mount_options'),
+ vfs_type=storage.get('vfs_type'),
+ address=storage.get('address'),
+ path=storage.get('path'),
+ )
+ )
+
+ def _attached_sds_service(self):
+ # Get data center object of the storage domain:
+ dcs_service = self._connection.system_service().data_centers_service()
+ dc = search_by_name(dcs_service, self._module.params['data_center'])
+ if dc is None:
+ return
+
+ dc_service = dcs_service.data_center_service(dc.id)
+ return dc_service.storage_domains_service()
+
+ def _maintenance(self, storage_domain):
+ attached_sds_service = self._attached_sds_service()
+ if attached_sds_service is None:
+ return
+
+ attached_sd_service = attached_sds_service.storage_domain_service(storage_domain.id)
+ attached_sd = attached_sd_service.get()
+
+ if attached_sd and attached_sd.status != sdstate.MAINTENANCE:
+ if not self._module.check_mode:
+ attached_sd_service.deactivate()
+ self.changed = True
+
+ wait(
+ service=attached_sd_service,
+ condition=lambda sd: sd.status == sdstate.MAINTENANCE,
+ wait=self._module.params['wait'],
+ timeout=self._module.params['timeout'],
+ )
+
+ def _unattach(self, storage_domain):
+ attached_sds_service = self._attached_sds_service()
+ if attached_sds_service is None:
+ return
+
+ attached_sd_service = attached_sds_service.storage_domain_service(storage_domain.id)
+ attached_sd = attached_sd_service.get()
+
+ if attached_sd and attached_sd.status == sdstate.MAINTENANCE:
+ if not self._module.check_mode:
+ # Detach the storage domain:
+ attached_sd_service.remove()
+ self.changed = True
+ # Wait until storage domain is detached:
+ wait(
+ service=attached_sd_service,
+ condition=lambda sd: sd is None,
+ wait=self._module.params['wait'],
+ timeout=self._module.params['timeout'],
+ )
+
+ def pre_remove(self, storage_domain):
+ # Before removing storage domain we need to put it into maintenance state:
+ self._maintenance(storage_domain)
+
+ # Before removing storage domain we need to detach it from data center:
+ self._unattach(storage_domain)
+
+ def post_create_check(self, sd_id):
+ storage_domain = self._service.service(sd_id).get()
+ self._service = self._attached_sds_service()
+
+ # If storage domain isn't attached, attach it:
+ attached_sd_service = self._service.service(storage_domain.id)
+ if attached_sd_service.get() is None:
+ self._service.add(
+ otypes.StorageDomain(
+ id=storage_domain.id,
+ ),
+ )
+ self.changed = True
+ # Wait until storage domain is in maintenance:
+ wait(
+ service=attached_sd_service,
+ condition=lambda sd: sd.status == sdstate.ACTIVE,
+ wait=self._module.params['wait'],
+ timeout=self._module.params['timeout'],
+ )
+
+ def unattached_pre_action(self, storage_domain):
+ self._service = self._attached_sds_service(storage_domain)
+ self._maintenance(self._service, storage_domain)
+
+
+def failed_state(sd):
+ return sd.status in [sdstate.UNKNOWN, sdstate.INACTIVE]
+
+
+def control_state(sd_module):
+ sd = sd_module.search_entity()
+ if sd is None:
+ return
+
+ sd_service = sd_module._service.service(sd.id)
+ if sd.status == sdstate.LOCKED:
+ wait(
+ service=sd_service,
+ condition=lambda sd: sd.status != sdstate.LOCKED,
+ fail_condition=failed_state,
+ )
+
+ if failed_state(sd):
+ raise Exception("Not possible to manage storage domain '%s'." % sd.name)
+ elif sd.status == sdstate.ACTIVATING:
+ wait(
+ service=sd_service,
+ condition=lambda sd: sd.status == sdstate.ACTIVE,
+ fail_condition=failed_state,
+ )
+ elif sd.status == sdstate.DETACHING:
+ wait(
+ service=sd_service,
+ condition=lambda sd: sd.status == sdstate.UNATTACHED,
+ fail_condition=failed_state,
+ )
+ elif sd.status == sdstate.PREPARING_FOR_MAINTENANCE:
+ wait(
+ service=sd_service,
+ condition=lambda sd: sd.status == sdstate.MAINTENANCE,
+ fail_condition=failed_state,
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent', 'maintenance', 'unattached'],
+ default='present',
+ ),
+ name=dict(required=True),
+ description=dict(default=None),
+ comment=dict(default=None),
+ data_center=dict(required=True),
+ domain_function=dict(choices=['data', 'iso', 'export'], default='data', aliases=['type']),
+ host=dict(default=None),
+ nfs=dict(default=None, type='dict'),
+ iscsi=dict(default=None, type='dict'),
+ posixfs=dict(default=None, type='dict'),
+ glusterfs=dict(default=None, type='dict'),
+ fcp=dict(default=None, type='dict'),
+ destroy=dict(type='bool', default=False),
+ format=dict(type='bool', default=False),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ storage_domains_service = connection.system_service().storage_domains_service()
+ storage_domains_module = StorageDomainModule(
+ connection=connection,
+ module=module,
+ service=storage_domains_service,
+ )
+
+ state = module.params['state']
+ control_state(storage_domains_module)
+ if state == 'absent':
+ ret = storage_domains_module.remove(
+ destroy=module.params['destroy'],
+ format=module.params['format'],
+ host=module.params['host'],
+ )
+ elif state == 'present':
+ sd_id = storage_domains_module.create()['id']
+ storage_domains_module.post_create_check(sd_id)
+ ret = storage_domains_module.action(
+ action='activate',
+ action_condition=lambda s: s.status == sdstate.MAINTENANCE,
+ wait_condition=lambda s: s.status == sdstate.ACTIVE,
+ fail_condition=failed_state,
+ )
+ elif state == 'maintenance':
+ sd_id = storage_domains_module.create()['id']
+ storage_domains_module.post_create_check(sd_id)
+ ret = storage_domains_module.action(
+ action='deactivate',
+ action_condition=lambda s: s.status == sdstate.ACTIVE,
+ wait_condition=lambda s: s.status == sdstate.MAINTENANCE,
+ fail_condition=failed_state,
+ )
+ elif state == 'unattached':
+ ret = storage_domains_module.create()
+ storage_domains_module.pre_remove(
+ storage_domain=storage_domains_service.service(ret['id']).get()
+ )
+ ret['changed'] = storage_domains_module.changed
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_storage_domains_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_storage_domains_facts.py
new file mode 100644
index 0000000000..23431ead50
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_storage_domains_facts.py
@@ -0,0 +1,104 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_full_argument_spec,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_storage_domains_facts
+short_description: Retrieve facts about one or more oVirt storage domains
+author: "Ondra Machacek (@machacekondra)"
+version_added: "2.3"
+description:
+ - "Retrieve facts about one or more oVirt storage domains."
+notes:
+ - "This module creates a new top-level C(ovirt_storage_domains) fact, which
+ contains a list of storage domains."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt search backend."
+ - "For example to search storage domain X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather facts about all storage domains which names start with C(data) and
+# belong to data center C(west):
+- ovirt_storage_domains_facts:
+ pattern: name=data* and datacenter=west
+- debug:
+ var: ovirt_storage_domains
+'''
+
+RETURN = '''
+ovirt_storage_domains:
+ description: "List of dictionaries describing the storage domains. Storage_domain attribues are mapped to dictionary keys,
+ all storage domains attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/storage_domain."
+ returned: On success.
+ type: list
+'''
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ storage_domains_service = connection.system_service().storage_domains_service()
+ storage_domains = storage_domains_service.list(search=module.params['pattern'])
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ ovirt_storage_domains=[
+ get_dict_of_struct(c) for c in storage_domains
+ ],
+ ),
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_templates.py b/lib/ansible/modules/cloud/ovirt/ovirt_templates.py
new file mode 100644
index 0000000000..831ab906c0
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_templates.py
@@ -0,0 +1,314 @@
+#!/usr/bin/pythonapi/
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import time
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ create_connection,
+ equal,
+ get_dict_of_struct,
+ get_link_name,
+ ovirt_full_argument_spec,
+ search_by_attributes,
+ search_by_name,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_templates
+short_description: Module to manage virtual machine templates in oVirt
+version_added: "2.3"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage virtual machine templates in oVirt."
+options:
+ name:
+ description:
+ - "Name of the the template to manage."
+ required: true
+ state:
+ description:
+ - "Should the template be present/absent/exported/imported"
+ choices: ['present', 'absent', 'exported', 'imported']
+ default: present
+ vm:
+ description:
+ - "Name of the VM, which will be used to create template."
+ description:
+ description:
+ - "Description of the template."
+ cpu_profile:
+ description:
+ - "CPU profile to be set to template."
+ cluster:
+ description:
+ - "Name of the cluster, where template should be created/imported."
+ exclusive:
+ description:
+ - "When C(state) is I(exported) this parameter indicates if the existing templates with the
+ same name should be overwritten."
+ export_domain:
+ description:
+ - "When C(state) is I(exported) or I(imported) this parameter specifies the name of the
+ export storage domain."
+ image_provider:
+ description:
+ - "When C(state) is I(imported) this parameter specifies the name of the image provider to be used."
+ image_disk:
+ description:
+ - "When C(state) is I(imported) and C(image_provider) is used this parameter specifies the name of disk
+ to be imported as template."
+ storage_domain:
+ description:
+ - "When C(state) is I(imported) this parameter specifies the name of the destination data storage domain."
+ clone_permissions:
+ description:
+ - "If I(True) then the permissions of the VM (only the direct ones, not the inherited ones)
+ will be copied to the created template."
+ - "This parameter is used only when C(state) I(present)."
+ default: False
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create template from vm
+- ovirt_templates:
+ cluster: Default
+ name: mytemplate
+ vm: rhel7
+ cpu_profile: Default
+ description: Test
+
+# Import template
+- ovirt_templates:
+ state: imported
+ name: mytemplate
+ export_domain: myexport
+ storage_domain: mystorage
+ cluster: mycluster
+
+# Remove template
+- ovirt_templates:
+ state: absent
+ name: mytemplate
+'''
+
+RETURN = '''
+id:
+ description: ID of the template which is managed
+ returned: On success if template is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+template:
+ description: "Dictionary of all the template attributes. Template attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/template."
+ returned: On success if template is found.
+'''
+
+
+class TemplatesModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.Template(
+ name=self._module.params['name'],
+ cluster=otypes.Cluster(
+ name=self._module.params['cluster']
+ ) if self._module.params['cluster'] else None,
+ vm=otypes.Vm(
+ name=self._module.params['vm']
+ ) if self._module.params['vm'] else None,
+ description=self._module.params['description'],
+ cpu_profile=otypes.CpuProfile(
+ id=search_by_name(
+ self._connection.system_service().cpu_profiles_service(),
+ self._module.params['cpu_profile'],
+ ).id
+ ) if self._module.params['cpu_profile'] else None,
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('cluster'), get_link_name(self._connection, entity.cluster)) and
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('cpu_profile'), get_link_name(self._connection, entity.cpu_profile))
+ )
+
+ def _get_export_domain_service(self):
+ provider_name = self._module.params['export_domain'] or self._module.params['image_provider']
+ export_sds_service = self._connection.system_service().storage_domains_service()
+ export_sd = search_by_name(export_sds_service, provider_name)
+ if export_sd is None:
+ raise ValueError(
+ "Export storage domain/Image Provider '%s' wasn't found." % provider_name
+ )
+
+ return export_sds_service.service(export_sd.id)
+
+ def post_export_action(self, entity):
+ self._service = self._get_export_domain_service().templates_service()
+
+ def post_import_action(self, entity):
+ self._service = self._connection.system_service().templates_service()
+
+
+def wait_for_import(module, templates_service):
+ if module.params['wait']:
+ start = time.time()
+ timeout = module.params['timeout']
+ poll_interval = module.params['poll_interval']
+ while time.time() < start + timeout:
+ template = search_by_name(templates_service, module.params['name'])
+ if template:
+ return template
+ time.sleep(poll_interval)
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent', 'exported', 'imported'],
+ default='present',
+ ),
+ name=dict(default=None, required=True),
+ vm=dict(default=None),
+ description=dict(default=None),
+ cluster=dict(default=None),
+ cpu_profile=dict(default=None),
+ disks=dict(default=[], type='list'),
+ clone_permissions=dict(type='bool'),
+ export_domain=dict(default=None),
+ storage_domain=dict(default=None),
+ exclusive=dict(type='bool'),
+ image_provider=dict(default=None),
+ image_disk=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ templates_service = connection.system_service().templates_service()
+ templates_module = TemplatesModule(
+ connection=connection,
+ module=module,
+ service=templates_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = templates_module.create(
+ result_state=otypes.TemplateStatus.OK,
+ clone_permissions=module.params['clone_permissions'],
+ )
+ elif state == 'absent':
+ ret = templates_module.remove()
+ elif state == 'exported':
+ template = templates_module.search_entity()
+ export_service = templates_module._get_export_domain_service()
+ export_template = search_by_attributes(export_service.templates_service(), id=template.id)
+
+ ret = templates_module.action(
+ entity=template,
+ action='export',
+ action_condition=lambda t: export_template is None,
+ wait_condition=lambda t: t is not None,
+ post_action=templates_module.post_export_action,
+ storage_domain=otypes.StorageDomain(id=export_service.get().id),
+ exclusive=module.params['exclusive'],
+ )
+ elif state == 'imported':
+ template = templates_module.search_entity()
+ if template:
+ ret = templates_module.create(
+ result_state=otypes.TemplateStatus.OK,
+ )
+ else:
+ kwargs = {}
+ if module.params['image_provider']:
+ kwargs.update(
+ disk=otypes.Disk(
+ name=module.params['image_disk']
+ ),
+ template=otypes.Template(
+ name=module.params['name'],
+ ),
+ import_as_template=True,
+ )
+
+ if module.params['image_disk']:
+ # We need to refresh storage domain to get list of images:
+ templates_module._get_export_domain_service().images_service().list()
+
+ glance_service = connection.system_service().openstack_image_providers_service()
+ image_provider = search_by_name(glance_service, module.params['image_provider'])
+ images_service = glance_service.service(image_provider.id).images_service()
+ else:
+ images_service = templates_module._get_export_domain_service().templates_service()
+ template_name = module.params['image_disk'] or module.params['name']
+ entity = search_by_name(images_service, template_name)
+ if entity is None:
+ raise Exception("Image/template '%s' was not found." % template_name)
+
+ images_service.service(entity.id).import_(
+ storage_domain=otypes.StorageDomain(
+ name=module.params['storage_domain']
+ ) if module.params['storage_domain'] else None,
+ cluster=otypes.Cluster(
+ name=module.params['cluster']
+ ) if module.params['cluster'] else None,
+ **kwargs
+ )
+ template = wait_for_import(module, templates_service)
+ ret = {
+ 'changed': True,
+ 'id': template.id,
+ 'template': get_dict_of_struct(template),
+ }
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_templates_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_templates_facts.py
new file mode 100644
index 0000000000..4a2c7c0d00
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_templates_facts.py
@@ -0,0 +1,104 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_full_argument_spec,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_templates_facts
+short_description: Retrieve facts about one or more oVirt templates
+author: "Ondra Machacek (@machacekondra)"
+version_added: "2.3"
+description:
+ - "Retrieve facts about one or more oVirt templates."
+notes:
+ - "This module creates a new top-level C(ovirt_templates) fact, which
+ contains a list of templates."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt search backend."
+ - "For example to search template X from datacenter Y use following pattern:
+ name=X and datacenter=Y"
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather facts about all templates which names start with C(centos) and
+# belongs to data center C(west):
+- ovirt_templates_facts:
+ pattern: name=centos* and datacenter=west
+- debug:
+ var: ovirt_templates
+'''
+
+RETURN = '''
+ovirt_templates:
+ description: "List of dictionaries describing the templates. Template attribues are mapped to dictionary keys,
+ all templates attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/template."
+ returned: On success.
+ type: list
+'''
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ templates_service = connection.system_service().templates_service()
+ templates = templates_service.list(search=module.params['pattern'])
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ ovirt_templates=[
+ get_dict_of_struct(c) for c in templates
+ ],
+ ),
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_users.py b/lib/ansible/modules/cloud/ovirt/ovirt_users.py
new file mode 100644
index 0000000000..4fb4712225
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_users.py
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ BaseModule,
+ check_sdk,
+ check_params,
+ create_connection,
+ ovirt_full_argument_spec,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_users
+short_description: Module to manage users in oVirt
+version_added: "2.3"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage users in oVirt."
+options:
+ name:
+ description:
+ - "Name of the the user to manage. In most LDAPs it's I(uid) of the user, but in Active Directory you must specify I(UPN) of the user."
+ required: true
+ state:
+ description:
+ - "Should the user be present/absent."
+ choices: ['present', 'absent']
+ default: present
+ authz_name:
+ description:
+ - "Authorization provider of the user. In previous versions of oVirt known as domain."
+ required: true
+ aliases: ['domain']
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Add user user1 from authorization provider example.com-authz
+ovirt_users:
+ name: user1
+ domain: example.com-authz
+
+# Add user user1 from authorization provider example.com-authz
+# In case of Active Directory specify UPN:
+ovirt_users:
+ name: user1@ad2.example.com
+ domain: example.com-authz
+
+# Remove user user1 with authorization provider example.com-authz
+ovirt_users:
+ state: absent
+ name: user1
+ authz_name: example.com-authz
+'''
+
+RETURN = '''
+id:
+ description: ID of the user which is managed
+ returned: On success if user is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+user:
+ description: "Dictionary of all the user attributes. User attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/user."
+ returned: On success if user is found.
+'''
+
+
+def username(module):
+ return '{}@{}'.format(module.params['name'], module.params['authz_name'])
+
+
+class UsersModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.User(
+ domain=otypes.Domain(
+ name=self._module.params['authz_name']
+ ),
+ user_name=username(self._module),
+ principal=self._module.params['name'],
+ namespace=self._module.params['namespace'],
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(required=True),
+ authz_name=dict(required=True, aliases=['domain']),
+ namespace=dict(default=None),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ users_service = connection.system_service().users_service()
+ users_module = UsersModule(
+ connection=connection,
+ module=module,
+ service=users_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = users_module.create(
+ search_params={
+ 'usrname': username(module),
+ }
+ )
+ elif state == 'absent':
+ ret = users_module.remove(
+ search_params={
+ 'usrname': username(module),
+ }
+ )
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_users_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_users_facts.py
new file mode 100644
index 0000000000..7d2b04f1fb
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_users_facts.py
@@ -0,0 +1,102 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_full_argument_spec,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_users_facts
+short_description: Retrieve facts about one or more oVirt users
+author: "Ondra Machacek (@machacekondra)"
+version_added: "2.3"
+description:
+ - "Retrieve facts about one or more oVirt users."
+notes:
+ - "This module creates a new top-level C(ovirt_users) fact, which
+ contains a list of users."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt search backend."
+ - "For example to search user X use following pattern: name=X"
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather facts about all users which first names start with C(john):
+- ovirt_users_facts:
+ pattern: name=john*
+- debug:
+ var: ovirt_users
+'''
+
+RETURN = '''
+ovirt_users:
+ description: "List of dictionaries describing the users. User attribues are mapped to dictionary keys,
+ all users attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/user."
+ returned: On success.
+ type: list
+'''
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ users_service = connection.system_service().users_service()
+ users = users_service.list(search=module.params['pattern'])
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ ovirt_users=[
+ get_dict_of_struct(c) for c in users
+ ],
+ ),
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_vmpools.py b/lib/ansible/modules/cloud/ovirt/ovirt_vmpools.py
new file mode 100644
index 0000000000..82e76d91dc
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_vmpools.py
@@ -0,0 +1,220 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+try:
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ BaseModule,
+ check_params,
+ check_sdk,
+ create_connection,
+ equal,
+ get_link_name,
+ ovirt_full_argument_spec,
+ wait,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_vmpools
+short_description: Module to manage VM pools in oVirt
+version_added: "2.3"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "Module to manage VM pools in oVirt."
+options:
+ name:
+ description:
+ - "Name of the the VM pool to manage."
+ required: true
+ state:
+ description:
+ - "Should the VM pool be present/absent."
+ - "Note that when C(state) is I(absent) all VMs in VM pool are stopped and removed."
+ choices: ['present', 'absent']
+ default: present
+ template:
+ description:
+ - "Name of the template, which will be used to create VM pool."
+ description:
+ description:
+ - "Description of the VM pool."
+ cluster:
+ description:
+ - "Name of the cluster, where VM pool should be created."
+ type:
+ description:
+ - "Type of the VM pool. Either manual or automatic."
+ - "C(manual) - The administrator is responsible for explicitly returning the virtual machine to the pool.
+ The virtual machine reverts to the original base image after the administrator returns it to the pool."
+ - "C(Automatic) - When the virtual machine is shut down, it automatically reverts to its base image and
+ is returned to the virtual machine pool."
+ - "Default value is set by engine."
+ choices: ['manual', 'automatic']
+ vm_per_user:
+ description:
+ - "Maximum number of VMs a single user can attach to from this pool."
+ - "Default value is set by engine."
+ prestarted:
+ description:
+ - "Number of pre-started VMs defines the number of VMs in run state, that are waiting
+ to be attached to Users."
+ - "Default value is set by engine."
+ vm_count:
+ description:
+ - "Number of VMs in the pool."
+ - "Default value is set by engine."
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Create VM pool from template
+- ovirt_vmpools:
+ cluster: mycluster
+ name: myvmpool
+ template: rhel7
+ vm_count: 2
+ prestarted: 2
+ vm_per_user: 1
+
+# Remove vmpool, note that all VMs in pool will be stopped and removed:
+- ovirt_vmpools:
+ state: absent
+ name: myvmpool
+'''
+
+RETURN = '''
+id:
+ description: ID of the VM pool which is managed
+ returned: On success if VM pool is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+vm_pool:
+ description: "Dictionary of all the VM pool attributes. VM pool attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/vm_pool."
+ returned: On success if VM pool is found.
+'''
+
+
+class VmPoolsModule(BaseModule):
+
+ def build_entity(self):
+ return otypes.VmPool(
+ name=self._module.params['name'],
+ description=self._module.params['description'],
+ comment=self._module.params['comment'],
+ cluster=otypes.Cluster(
+ name=self._module.params['cluster']
+ ) if self._module.params['cluster'] else None,
+ template=otypes.Template(
+ name=self._module.params['template']
+ ) if self._module.params['template'] else None,
+ max_user_vms=self._module.params['vm_per_user'],
+ prestarted_vms=self._module.params['prestarted'],
+ size=self._module.params['vm_count'],
+ type=otypes.VmPoolType(
+ self._module.params['type']
+ ) if self._module.params['type'] else None,
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('cluster'), get_link_name(self._connection, entity.cluster)) and
+ equal(self._module.params.get('description'), entity.description) and
+ equal(self._module.params.get('comment'), entity.comment) and
+ equal(self._module.params.get('vm_per_user'), entity.max_user_vms) and
+ equal(self._module.params.get('prestarted'), entity.prestarted_vms) and
+ equal(self._module.params.get('vm_count'), entity.size)
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['present', 'absent'],
+ default='present',
+ ),
+ name=dict(default=None, required=True),
+ template=dict(default=None),
+ cluster=dict(default=None),
+ description=dict(default=None),
+ comment=dict(default=None),
+ vm_per_user=dict(default=None, type='int'),
+ prestarted=dict(default=None, type='int'),
+ vm_count=dict(default=None, type='int'),
+ type=dict(default=None, choices=['automatic', 'manual']),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ vm_pools_service = connection.system_service().vm_pools_service()
+ vm_pools_module = VmPoolsModule(
+ connection=connection,
+ module=module,
+ service=vm_pools_service,
+ )
+
+ state = module.params['state']
+ if state == 'present':
+ ret = vm_pools_module.create()
+
+ # Wait for all VM pool VMs to be created:
+ if module.params['wait']:
+ vms_service = connection.system_service().vms_service()
+ for vm in vms_service.list(search='pool=%s' % module.params['name']):
+ wait(
+ service=vms_service.service(vm.id),
+ condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP],
+ timeout=module.params['timeout'],
+ )
+
+ elif state == 'absent':
+ ret = vm_pools_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_vmpools_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_vmpools_facts.py
new file mode 100644
index 0000000000..fb20a12f83
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_vmpools_facts.py
@@ -0,0 +1,101 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_full_argument_spec,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_vmpools_facts
+short_description: Retrieve facts about one or more oVirt vmpools
+author: "Ondra Machacek (@machacekondra)"
+version_added: "2.3"
+description:
+ - "Retrieve facts about one or more oVirt vmpools."
+notes:
+ - "This module creates a new top-level C(ovirt_vmpools) fact, which
+ contains a list of vmpools."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt search backend."
+ - "For example to search vmpool X: name=X"
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather facts about all vm pools which names start with C(centos):
+- ovirt_vmpools_facts:
+ pattern: name=centos*
+- debug:
+ var: ovirt_vmpools
+'''
+
+RETURN = '''
+ovirt_vm_pools:
+ description: "List of dictionaries describing the vmpools. Vm pool attribues are mapped to dictionary keys,
+ all vmpools attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/vm_pool."
+ returned: On success.
+ type: list
+'''
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ vmpools_service = connection.system_service().vm_pools_service()
+ vmpools = vmpools_service.list(search=module.params['pattern'])
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ ovirt_vm_pools=[
+ get_dict_of_struct(c) for c in vmpools
+ ],
+ ),
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_vms.py b/lib/ansible/modules/cloud/ovirt/ovirt_vms.py
new file mode 100644
index 0000000000..4edfe0aa59
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_vms.py
@@ -0,0 +1,887 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+try:
+ import ovirtsdk4 as sdk
+ import ovirtsdk4.types as otypes
+except ImportError:
+ pass
+
+from ansible.module_utils.ovirt import *
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_vms
+short_description: "Module to manage Virtual Machines in oVirt."
+version_added: "2.2"
+author: "Ondra Machacek (@machacekondra)"
+description:
+ - "This module manages whole lifecycle of the Virtual Machine(VM) in oVirt. Since VM can hold many states in oVirt,
+ this see notes to see how the states of the VM are handled."
+options:
+ name:
+ description:
+ - "Name of the the Virtual Machine to manage. If VM don't exists C(name) is required.
+ Otherwise C(id) or C(name) can be used."
+ id:
+ description:
+ - "ID of the the Virtual Machine to manage."
+ state:
+ description:
+ - "Should the Virtual Machine be running/stopped/present/absent/suspended/next_run."
+ - "I(present) and I(running) are equal states."
+ - "I(next_run) state updates the VM and if the VM has next run configuration it will be rebooted."
+ - "Please check I(notes) to more detailed description of states."
+ choices: ['running', 'stopped', 'present', 'absent', 'suspended', 'next_run']
+ default: present
+ cluster:
+ description:
+ - "Name of the cluster, where Virtual Machine should be created. Required if creating VM."
+ template:
+ description:
+ - "Name of the template, which should be used to create Virtual Machine. Required if creating VM."
+ - "If template is not specified and VM doesn't exist, VM will be created from I(Blank) template."
+ template_version:
+ description:
+ - "Version number of the template to be used for VM."
+ - "By default the latest available version of the template is used."
+ version_added: "2.3"
+ use_latest_template_version:
+ description:
+ - "Specify if latest template version should be used, when running a stateless VM."
+ - "If this parameter is set to I(true) stateless VM is created."
+ version_added: "2.3"
+ memory:
+ description:
+ - "Amount of memory of the Virtual Machine. Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB)."
+ - "Default value is set by engine."
+ memory_guaranteed:
+ description:
+ - "Amount of minimal guaranteed memory of the Virtual Machine.
+ Prefix uses IEC 60027-2 standard (for example 1GiB, 1024MiB)."
+ - "C(memory_guaranteed) parameter can't be lower than C(memory) parameter. Default value is set by engine."
+ cpu_shares:
+ description:
+ - "Set a CPU shares for this Virtual Machine. Default value is set by oVirt engine."
+ cpu_cores:
+ description:
+ - "Number of virtual CPUs cores of the Virtual Machine. Default value is set by oVirt engine."
+ cpu_sockets:
+ description:
+ - "Number of virtual CPUs sockets of the Virtual Machine. Default value is set by oVirt engine."
+ type:
+ description:
+ - "Type of the Virtual Machine. Default value is set by oVirt engine."
+ choices: [server, desktop]
+ operating_system:
+ description:
+ - "Operating system of the Virtual Machine. Default value is set by oVirt engine."
+ choices: [
+ rhel_6_ppc64, other, freebsd, windows_2003x64, windows_10, rhel_6x64, rhel_4x64, windows_2008x64,
+ windows_2008R2x64, debian_7, windows_2012x64, ubuntu_14_04, ubuntu_12_04, ubuntu_13_10, windows_8x64,
+ other_linux_ppc64, windows_2003, other_linux, windows_10x64, windows_2008, rhel_3, rhel_5, rhel_4,
+ other_ppc64, sles_11, rhel_6, windows_xp, rhel_7x64, freebsdx64, rhel_7_ppc64, windows_7, rhel_5x64,
+ ubuntu_14_04_ppc64, sles_11_ppc64, windows_8, windows_2012R2x64, windows_2008r2x64, ubuntu_13_04,
+ ubuntu_12_10, windows_7x64
+ ]
+ boot_devices:
+ description:
+ - "List of boot devices which should be used to boot. Choices I(network), I(hd) and I(cdrom)."
+ - "For example: ['cdrom', 'hd']. Default value is set by oVirt engine."
+ host:
+ description:
+ - "Specify host where Virtual Machine should be running. By default the host is chosen by engine scheduler."
+ - "This parameter is used only when C(state) is I(running) or I(present)."
+ high_availability:
+ description:
+ - "If I(True) Virtual Machine will be set as highly available."
+ - "If I(False) Virtual Machine won't be set as highly available."
+ - "If no value is passed, default value is set by oVirt engine."
+ delete_protected:
+ description:
+ - "If I(True) Virtual Machine will be set as delete protected."
+ - "If I(False) Virtual Machine won't be set as delete protected."
+ - "If no value is passed, default value is set by oVirt engine."
+ stateless:
+ description:
+ - "If I(True) Virtual Machine will be set as stateless."
+ - "If I(False) Virtual Machine will be unset as stateless."
+ - "If no value is passed, default value is set by oVirt engine."
+ clone:
+ description:
+ - "If I(True) then the disks of the created virtual machine will be cloned and independent of the template."
+ - "This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before."
+ default: False
+ clone_permissions:
+ description:
+ - "If I(True) then the permissions of the template (only the direct ones, not the inherited ones)
+ will be copied to the created virtual machine."
+ - "This parameter is used only when C(state) is I(running) or I(present) and VM didn't exist before."
+ default: False
+ cd_iso:
+ description:
+ - "ISO file from ISO storage domain which should be attached to Virtual Machine."
+ - "If you pass empty string the CD will be ejected from VM."
+ - "If used with C(state) I(running) or I(present) and VM is running the CD will be attached to VM."
+ - "If used with C(state) I(running) or I(present) and VM is down the CD will be attached to VM persistently."
+ force:
+ description:
+ - "Please check to I(Synopsis) to more detailed description of force parameter, it can behave differently
+ in different situations."
+ default: False
+ nics:
+ description:
+ - "List of NICs, which should be attached to Virtual Machine. NIC is described by following dictionary:"
+ - "C(name) - Name of the NIC."
+ - "C(profile_name) - Profile name where NIC should be attached."
+ - "C(interface) - Type of the network interface. One of following: I(virtio), I(e1000), I(rtl8139), default is I(virtio)."
+ - "C(mac_address) - Custom MAC address of the network interface, by default it's obtained from MAC pool."
+ - "C(Note:)"
+ - "This parameter is used only when C(state) is I(running) or I(present) and is able to only create NICs.
+ To manage NICs of the VM in more depth please use M(ovirt_nics) module instead."
+ disks:
+ description:
+ - "List of disks, which should be attached to Virtual Machine. Disk is described by following dictionary:"
+ - "C(name) - Name of the disk. Either C(name) or C(id) is reuqired."
+ - "C(id) - ID of the disk. Either C(name) or C(id) is reuqired."
+ - "C(interface) - Interface of the disk, either I(virtio) or I(IDE), default is I(virtio)."
+ - "C(bootable) - I(True) if the disk should be bootable, default is non bootable."
+ - "C(activate) - I(True) if the disk should be activated, default is activated."
+ - "C(Note:)"
+ - "This parameter is used only when C(state) is I(running) or I(present) and is able to only attach disks.
+ To manage disks of the VM in more depth please use M(ovirt_disks) module instead."
+ sysprep:
+ description:
+ - "Dictionary with values for Windows Virtual Machine initialization using sysprep:"
+ - "C(host_name) - Hostname to be set to Virtual Machine when deployed."
+ - "C(active_directory_ou) - Active Directory Organizational Unit, to be used for login of user."
+ - "C(org_name) - Organization name to be set to Windows Virtual Machine."
+ - "C(domain) - Domain to be set to Windows Virtual Machine."
+ - "C(timezone) - Timezone to be set to Windows Virtual Machine."
+ - "C(ui_language) - UI language of the Windows Virtual Machine."
+ - "C(system_locale) - System localization of the Windows Virtual Machine."
+ - "C(input_locale) - Input localization of the Windows Virtual Machine."
+ - "C(windows_license_key) - License key to be set to Windows Virtual Machine."
+ - "C(user_name) - Username to be used for set password to Windows Virtual Machine."
+ - "C(root_password) - Password to be set for username to Windows Virtual Machine."
+ cloud_init:
+ description:
+ - "Dictionary with values for Unix-like Virtual Machine initialization using cloud init:"
+ - "C(host_name) - Hostname to be set to Virtual Machine when deployed."
+ - "C(timezone) - Timezone to be set to Virtual Machine when deployed."
+ - "C(user_name) - Username to be used to set password to Virtual Machine when deployed."
+ - "C(root_password) - Password to be set for user specified by C(user_name) parameter."
+ - "C(authorized_ssh_keys) - Use this SSH keys to login to Virtual Machine."
+ - "C(regenerate_ssh_keys) - If I(True) SSH keys will be regenerated on Virtual Machine."
+ - "C(custom_script) - Cloud-init script which will be executed on Virtual Machine when deployed."
+ - "C(dns_servers) - DNS servers to be configured on Virtual Machine."
+ - "C(dns_search) - DNS search domains to be configured on Virtual Machine."
+ - "C(nic_boot_protocol) - Set boot protocol of the network interface of Virtual Machine. Can be one of none, dhcp or static."
+ - "C(nic_ip_address) - If boot protocol is static, set this IP address to network interface of Virtual Machine."
+ - "C(nic_netmask) - If boot protocol is static, set this netmask to network interface of Virtual Machine."
+ - "C(nic_gateway) - If boot protocol is static, set this gateway to network interface of Virtual Machine."
+ - "C(nic_name) - Set name to network interface of Virtual Machine."
+ - "C(nic_on_boot) - If I(True) network interface will be set to start on boot."
+ cloud_init_nics:
+ description:
+ - "List of dictionaries representing network interafaces to be setup by cloud init."
+ - "This option is used, when user needs to setup more network interfaces via cloud init."
+ - "If one network interface is enough, user should use C(cloud_init) I(nic_*) parameters. C(cloud_init) I(nic_*) parameters
+ are merged with C(cloud_init_nics) parameters."
+ - "Dictionary can contain following values:"
+ - "C(nic_boot_protocol) - Set boot protocol of the network interface of Virtual Machine. Can be one of none, dhcp or static."
+ - "C(nic_ip_address) - If boot protocol is static, set this IP address to network interface of Virtual Machine."
+ - "C(nic_netmask) - If boot protocol is static, set this netmask to network interface of Virtual Machine."
+ - "C(nic_gateway) - If boot protocol is static, set this gateway to network interface of Virtual Machine."
+ - "C(nic_name) - Set name to network interface of Virtual Machine."
+ - "C(nic_on_boot) - If I(True) network interface will be set to start on boot."
+ version_added: "2.3"
+notes:
+ - "If VM is in I(UNASSIGNED) or I(UNKNOWN) state before any operation, the module will fail.
+ If VM is in I(IMAGE_LOCKED) state before any operation, we try to wait for VM to be I(DOWN).
+ If VM is in I(SAVING_STATE) state before any operation, we try to wait for VM to be I(SUSPENDED).
+ If VM is in I(POWERING_DOWN) state before any operation, we try to wait for VM to be I(UP) or I(DOWN). VM can
+ get into I(UP) state from I(POWERING_DOWN) state, when there is no ACPI or guest agent running inside VM, or
+ if the shutdown operation fails.
+ When user specify I(UP) C(state), we always wait to VM to be in I(UP) state in case VM is I(MIGRATING),
+ I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). In other states we run start operation on VM.
+ When user specify I(stopped) C(state), and If user pass C(force) parameter set to I(true) we forcibly stop the VM in
+ any state. If user don't pass C(force) parameter, we always wait to VM to be in UP state in case VM is
+ I(MIGRATING), I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or
+ I(SUSPENDED) state, we start the VM. Then we gracefully shutdown the VM.
+ When user specify I(suspended) C(state), we always wait to VM to be in UP state in case VM is I(MIGRATING),
+ I(REBOOTING), I(POWERING_UP), I(RESTORING_STATE), I(WAIT_FOR_LAUNCH). If VM is in I(PAUSED) or I(DOWN) state,
+ we start the VM. Then we suspend the VM.
+ When user specify I(absent) C(state), we forcibly stop the VM in any state and remove it."
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Creates a new Virtual Machine from template named 'rhel7_template'
+ovirt_vms:
+ state: present
+ name: myvm
+ template: rhel7_template
+
+# Creates a stateless VM which will always use latest template version:
+ovirt_vms:
+ name: myvm
+ template: rhel7
+ cluster: mycluster
+ use_latest_template_version: true
+
+# Creates a new server rhel7 Virtual Machine from Blank template
+# on brq01 cluster with 2GiB memory and 2 vcpu cores/sockets
+# and attach bootable disk with name rhel7_disk and attach virtio NIC
+ovirt_vms:
+ state: present
+ cluster: brq01
+ name: myvm
+ memory: 2GiB
+ cpu_cores: 2
+ cpu_sockets: 2
+ cpu_shares: 1024
+ type: server
+ operating_system: rhel_7x64
+ disks:
+ - name: rhel7_disk
+ bootable: True
+ nics:
+ - name: nic1
+
+# Run VM with cloud init:
+ovirt_vms:
+ name: rhel7
+ template: rhel7
+ cluster: Default
+ memory: 1GiB
+ high_availability: true
+ cloud_init:
+ nic_boot_protocol: static
+ nic_ip_address: 10.34.60.86
+ nic_netmask: 255.255.252.0
+ nic_gateway: 10.34.63.254
+ nic_name: eth1
+ nic_on_boot: true
+ host_name: example.com
+ custom_script: |
+ write_files:
+ - content: |
+ Hello, world!
+ path: /tmp/greeting.txt
+ permissions: '0644'
+ user_name: root
+ root_password: super_password
+
+# Run VM with cloud init, with multiple network interfaces:
+ovirt_vms:
+ name: rhel7_4
+ template: rhel7
+ cluster: mycluster
+ cloud_init_nics:
+ - nic_name: eth0
+ nic_boot_protocol: dhcp
+ nic_on_boot: true
+ - nic_name: eth1
+ nic_boot_protocol: static
+ nic_ip_address: 10.34.60.86
+ nic_netmask: 255.255.252.0
+ nic_gateway: 10.34.63.254
+ nic_on_boot: true
+
+# Run VM with sysprep:
+ovirt_vms:
+ name: windows2012R2_AD
+ template: windows2012R2
+ cluster: Default
+ memory: 3GiB
+ high_availability: true
+ sysprep:
+ host_name: windowsad.example.com
+ user_name: Administrator
+ root_password: SuperPassword123
+
+# Migrate/Run VM to/on host named 'host1'
+ovirt_vms:
+ state: running
+ name: myvm
+ host: host1
+
+# Change Vm's CD:
+ovirt_vms:
+ name: myvm
+ cd_iso: drivers.iso
+
+# Eject Vm's CD:
+ovirt_vms:
+ name: myvm
+ cd_iso: ''
+
+# Boot VM from CD:
+ovirt_vms:
+ name: myvm
+ cd_iso: centos7_x64.iso
+ boot_devices:
+ - cdrom
+
+# Stop vm:
+ovirt_vms:
+ state: stopped
+ name: myvm
+
+# Upgrade memory to already created VM:
+ovirt_vms:
+ name: myvm
+ memory: 4GiB
+
+# Hot plug memory to already created and running VM:
+# (VM won't be restarted)
+ovirt_vms:
+ name: myvm
+ memory: 4GiB
+
+# When change on the VM needs restart of the VM, use next_run state,
+# The VM will be updated and rebooted if there are any changes.
+# If present state would be used, VM won't be restarted.
+ovirt_vms:
+ state: next_run
+ name: myvm
+ boot_devices:
+ - network
+
+# Remove VM, if VM is running it will be stopped:
+ovirt_vms:
+ state: absent
+ name: myvm
+'''
+
+
+RETURN = '''
+id:
+ description: ID of the VM which is managed
+ returned: On success if VM is found.
+ type: str
+ sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
+vm:
+ description: "Dictionary of all the VM attributes. VM attributes can be found on your oVirt instance
+ at following url: https://ovirt.example.com/ovirt-engine/api/model#types/vm."
+ returned: On success if VM is found.
+'''
+
+
+class VmsModule(BaseModule):
+
+ def __get_template_with_version(self):
+ """
+ oVirt in version 4.1 doesn't support search by template+version_number,
+ so we need to list all templates with specific name and then iterate
+ throught it's version until we find the version we look for.
+ """
+ template = None
+ if self._module.params['template']:
+ templates_service = self._connection.system_service().templates_service()
+ templates = templates_service.list(search='name=%s' % self._module.params['template'])
+ if self._module.params['template_version']:
+ templates = [
+ t for t in templates
+ if t.version.version_number == self._module.params['template_version']
+ ]
+ if templates:
+ template = templates[0]
+
+ return template
+
+ def build_entity(self):
+ template = self.__get_template_with_version()
+ return otypes.Vm(
+ name=self._module.params['name'],
+ cluster=otypes.Cluster(
+ name=self._module.params['cluster']
+ ) if self._module.params['cluster'] else None,
+ template=otypes.Template(
+ id=template.id,
+ ) if template else None,
+ use_latest_template_version=self._module.params['use_latest_template_version'],
+ stateless=self._module.params['stateless'] or self._module.params['use_latest_template_version'],
+ delete_protected=self._module.params['delete_protected'],
+ high_availability=otypes.HighAvailability(
+ enabled=self._module.params['high_availability']
+ ) if self._module.params['high_availability'] is not None else None,
+ cpu=otypes.Cpu(
+ topology=otypes.CpuTopology(
+ cores=self._module.params['cpu_cores'],
+ sockets=self._module.params['cpu_sockets'],
+ )
+ ) if (
+ self._module.params['cpu_cores'] or self._module.params['cpu_sockets']
+ ) else None,
+ cpu_shares=self._module.params['cpu_shares'],
+ os=otypes.OperatingSystem(
+ type=self._module.params['operating_system'],
+ boot=otypes.Boot(
+ devices=[
+ otypes.BootDevice(dev) for dev in self._module.params['boot_devices']
+ ],
+ ) if self._module.params['boot_devices'] else None,
+ ) if (
+ self._module.params['operating_system'] or self._module.params['boot_devices']
+ ) else None,
+ type=otypes.VmType(
+ self._module.params['type']
+ ) if self._module.params['type'] else None,
+ memory=convert_to_bytes(
+ self._module.params['memory']
+ ) if self._module.params['memory'] else None,
+ memory_policy=otypes.MemoryPolicy(
+ guaranteed=convert_to_bytes(self._module.params['memory_guaranteed']),
+ ) if self._module.params['memory_guaranteed'] else None,
+ )
+
+ def update_check(self, entity):
+ return (
+ equal(self._module.params.get('cluster'), get_link_name(self._connection, entity.cluster)) and
+ equal(convert_to_bytes(self._module.params['memory']), entity.memory) and
+ equal(convert_to_bytes(self._module.params['memory_guaranteed']), entity.memory_policy.guaranteed) and
+ equal(self._module.params.get('cpu_cores'), entity.cpu.topology.cores) and
+ equal(self._module.params.get('cpu_sockets'), entity.cpu.topology.sockets) and
+ equal(self._module.params.get('type'), str(entity.type)) and
+ equal(self._module.params.get('operating_system'), str(entity.os.type)) and
+ equal(self._module.params.get('high_availability'), entity.high_availability.enabled) and
+ equal(self._module.params.get('stateless'), entity.stateless) and
+ equal(self._module.params.get('cpu_shares'), entity.cpu_shares) and
+ equal(self._module.params.get('delete_protected'), entity.delete_protected) and
+ equal(self._module.params.get('use_latest_template_version'), entity.use_latest_template_version) and
+ equal(self._module.params.get('boot_devices'), [str(dev) for dev in getattr(entity.os, 'devices', [])])
+ )
+
+ def pre_create(self, entity):
+ # If VM don't exists, and template is not specified, set it to Blank:
+ if entity is None:
+ if self._module.params.get('template') is None:
+ self._module.params['template'] = 'Blank'
+
+ def post_update(self, entity):
+ self.post_create(entity)
+
+ def post_create(self, entity):
+ # After creation of the VM, attach disks and NICs:
+ self.changed = self.__attach_disks(entity)
+ self.changed = self.__attach_nics(entity)
+
+ def pre_remove(self, entity):
+ # Forcibly stop the VM, if it's not in DOWN state:
+ if entity.status != otypes.VmStatus.DOWN:
+ if not self._module.check_mode:
+ self.changed = self.action(
+ action='stop',
+ action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ )['changed']
+
+ def __suspend_shutdown_common(self, vm_service):
+ if vm_service.get().status in [
+ otypes.VmStatus.MIGRATING,
+ otypes.VmStatus.POWERING_UP,
+ otypes.VmStatus.REBOOT_IN_PROGRESS,
+ otypes.VmStatus.WAIT_FOR_LAUNCH,
+ otypes.VmStatus.UP,
+ otypes.VmStatus.RESTORING_STATE,
+ ]:
+ self._wait_for_UP(vm_service)
+
+ def _pre_shutdown_action(self, entity):
+ vm_service = self._service.vm_service(entity.id)
+ self.__suspend_shutdown_common(vm_service)
+ if entity.status in [otypes.VmStatus.SUSPENDED, otypes.VmStatus.PAUSED]:
+ vm_service.start()
+ self._wait_for_UP(vm_service)
+ return vm_service.get()
+
+ def _pre_suspend_action(self, entity):
+ vm_service = self._service.vm_service(entity.id)
+ self.__suspend_shutdown_common(vm_service)
+ if entity.status in [otypes.VmStatus.PAUSED, otypes.VmStatus.DOWN]:
+ vm_service.start()
+ self._wait_for_UP(vm_service)
+ return vm_service.get()
+
+ def _post_start_action(self, entity):
+ vm_service = self._service.service(entity.id)
+ self._wait_for_UP(vm_service)
+ self._attach_cd(vm_service.get())
+ self._migrate_vm(vm_service.get())
+
+ def _attach_cd(self, entity):
+ cd_iso = self._module.params['cd_iso']
+ if cd_iso is not None:
+ vm_service = self._service.service(entity.id)
+ current = vm_service.get().status == otypes.VmStatus.UP
+ cdroms_service = vm_service.cdroms_service()
+ cdrom_device = cdroms_service.list()[0]
+ cdrom_service = cdroms_service.cdrom_service(cdrom_device.id)
+ cdrom = cdrom_service.get(current=current)
+ if getattr(cdrom.file, 'id', '') != cd_iso:
+ if not self._module.check_mode:
+ cdrom_service.update(
+ cdrom=otypes.Cdrom(
+ file=otypes.File(id=cd_iso)
+ ),
+ current=current,
+ )
+ self.changed = True
+
+ return entity
+
+ def _migrate_vm(self, entity):
+ vm_host = self._module.params['host']
+ vm_service = self._service.vm_service(entity.id)
+ if vm_host is not None:
+ # In case VM is preparing to be UP, wait to be up, to migrate it:
+ if entity.status == otypes.VmStatus.UP:
+ hosts_service = self._connection.system_service().hosts_service()
+ current_vm_host = hosts_service.host_service(entity.host.id).get().name
+ if vm_host != current_vm_host:
+ if not self._module.check_mode:
+ vm_service.migrate(host=otypes.Host(name=vm_host))
+ self._wait_for_UP(vm_service)
+ self.changed = True
+
+ return entity
+
+ def _wait_for_UP(self, vm_service):
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ wait=self._module.params['wait'],
+ timeout=self._module.params['timeout'],
+ )
+
+ def __attach_disks(self, entity):
+ disks_service = self._connection.system_service().disks_service()
+
+ for disk in self._module.params['disks']:
+ # If disk ID is not specified, find disk by name:
+ disk_id = disk.get('id')
+ if disk_id is None:
+ disk_id = getattr(
+ search_by_name(
+ service=disks_service,
+ name=disk.get('name')
+ ),
+ 'id',
+ None
+ )
+
+ # Attach disk to VM:
+ disk_attachments_service = self._service.service(entity.id).disk_attachments_service()
+ if disk_attachments_service.attachment_service(disk_id).get() is None:
+ if not self._module.check_mode:
+ disk_attachments_service.add(
+ otypes.DiskAttachment(
+ disk=otypes.Disk(
+ id=disk_id,
+ ),
+ active=disk.get('activate', True),
+ interface=otypes.DiskInterface(
+ disk.get('interface', 'virtio')
+ ),
+ bootable=disk.get('bootable', False),
+ )
+ )
+ self.changed = True
+
+ def __attach_nics(self, entity):
+ # Attach NICs to VM, if specified:
+ vnic_profiles_service = self._connection.system_service().vnic_profiles_service()
+ nics_service = self._service.service(entity.id).nics_service()
+ for nic in self._module.params['nics']:
+ if search_by_name(nics_service, nic.get('name')) is None:
+ if not self._module.check_mode:
+ nics_service.add(
+ otypes.Nic(
+ name=nic.get('name'),
+ interface=otypes.NicInterface(
+ nic.get('interface', 'virtio')
+ ),
+ vnic_profile=otypes.VnicProfile(
+ id=search_by_name(
+ vnic_profiles_service,
+ nic.get('profile_name'),
+ ).id
+ ) if nic.get('profile_name') else None,
+ mac=otypes.Mac(
+ address=nic.get('mac_address')
+ ) if nic.get('mac_address') else None,
+ )
+ )
+ self.changed = True
+
+
+def _get_initialization(sysprep, cloud_init, cloud_init_nics):
+ initialization = None
+ if cloud_init or cloud_init_nics:
+ initialization = otypes.Initialization(
+ nic_configurations=[
+ otypes.NicConfiguration(
+ boot_protocol=otypes.BootProtocol(
+ nic.pop('nic_boot_protocol').lower()
+ ) if nic.get('nic_boot_protocol') else None,
+ name=nic.pop('nic_name', None),
+ on_boot=nic.pop('nic_on_boot', None),
+ ip=otypes.Ip(
+ address=nic.pop('nic_ip_address', None),
+ netmask=nic.pop('nic_netmask', None),
+ gateway=nic.pop('nic_gateway', None),
+ ) if (
+ nic.get('nic_gateway') is not None or
+ nic.get('nic_netmask') is not None or
+ nic.get('nic_ip_address') is not None
+ ) else None,
+ )
+ for nic in cloud_init_nics
+ if (
+ nic.get('nic_gateway') is not None or
+ nic.get('nic_netmask') is not None or
+ nic.get('nic_ip_address') is not None or
+ nic.get('nic_boot_protocol') is not None or
+ nic.get('nic_on_boot') is not None
+ )
+ ] if cloud_init_nics else None,
+ **cloud_init
+ )
+ elif sysprep:
+ initialization = otypes.Initialization(
+ **sysprep
+ )
+ return initialization
+
+
+def control_state(vm, vms_service, module):
+ if vm is None:
+ return
+
+ force = module.params['force']
+ state = module.params['state']
+
+ vm_service = vms_service.vm_service(vm.id)
+ if vm.status == otypes.VmStatus.IMAGE_LOCKED:
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ )
+ elif vm.status == otypes.VmStatus.SAVING_STATE:
+ # Result state is SUSPENDED, we should wait to be suspended:
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED,
+ )
+ elif (
+ vm.status == otypes.VmStatus.UNASSIGNED or
+ vm.status == otypes.VmStatus.UNKNOWN
+ ):
+ # Invalid states:
+ module.fail_json("Not possible to control VM, if it's in '{}' status".format(vm.status))
+ elif vm.status == otypes.VmStatus.POWERING_DOWN:
+ if (force and state == 'stopped') or state == 'absent':
+ vm_service.stop()
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ )
+ else:
+ # If VM is powering down, wait to be DOWN or UP.
+ # VM can end in UP state in case there is no GA
+ # or ACPI on the VM or shutdown operation crashed:
+ wait(
+ service=vm_service,
+ condition=lambda vm: vm.status in [otypes.VmStatus.DOWN, otypes.VmStatus.UP],
+ )
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ state=dict(
+ choices=['running', 'stopped', 'present', 'absent', 'suspended', 'next_run'],
+ default='present',
+ ),
+ name=dict(default=None),
+ id=dict(default=None),
+ cluster=dict(default=None),
+ template=dict(default=None),
+ template_version=dict(default=None, type='int'),
+ use_latest_template_version=dict(default=None, type='bool'),
+ disks=dict(default=[], type='list'),
+ memory=dict(default=None),
+ memory_guaranteed=dict(default=None),
+ cpu_sockets=dict(default=None, type='int'),
+ cpu_cores=dict(default=None, type='int'),
+ cpu_shares=dict(default=None, type='int'),
+ type=dict(choices=['server', 'desktop']),
+ operating_system=dict(
+ default=None,
+ choices=[
+ 'rhel_6_ppc64', 'other', 'freebsd', 'windows_2003x64', 'windows_10',
+ 'rhel_6x64', 'rhel_4x64', 'windows_2008x64', 'windows_2008R2x64',
+ 'debian_7', 'windows_2012x64', 'ubuntu_14_04', 'ubuntu_12_04',
+ 'ubuntu_13_10', 'windows_8x64', 'other_linux_ppc64', 'windows_2003',
+ 'other_linux', 'windows_10x64', 'windows_2008', 'rhel_3', 'rhel_5',
+ 'rhel_4', 'other_ppc64', 'sles_11', 'rhel_6', 'windows_xp', 'rhel_7x64',
+ 'freebsdx64', 'rhel_7_ppc64', 'windows_7', 'rhel_5x64',
+ 'ubuntu_14_04_ppc64', 'sles_11_ppc64', 'windows_8',
+ 'windows_2012R2x64', 'windows_2008r2x64', 'ubuntu_13_04',
+ 'ubuntu_12_10', 'windows_7x64',
+ ],
+ ),
+ cd_iso=dict(default=None),
+ boot_devices=dict(default=None, type='list'),
+ high_availability=dict(type='bool'),
+ stateless=dict(type='bool'),
+ delete_protected=dict(type='bool'),
+ force=dict(type='bool', default=False),
+ nics=dict(default=[], type='list'),
+ cloud_init=dict(type='dict'),
+ cloud_init_nics=dict(defaul=[], type='list'),
+ sysprep=dict(type='dict'),
+ host=dict(default=None),
+ clone=dict(type='bool', default=False),
+ clone_permissions=dict(type='bool', default=False),
+ )
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+ check_sdk(module)
+ check_params(module)
+
+ try:
+ state = module.params['state']
+ connection = create_connection(module.params.pop('auth'))
+ vms_service = connection.system_service().vms_service()
+ vms_module = VmsModule(
+ connection=connection,
+ module=module,
+ service=vms_service,
+ )
+ vm = vms_module.search_entity()
+
+ control_state(vm, vms_service, module)
+ if state == 'present' or state == 'running' or state == 'next_run':
+ sysprep = module.params['sysprep']
+ cloud_init = module.params['cloud_init']
+ cloud_init_nics = module.params['cloud_init_nics']
+ cloud_init_nics.append(cloud_init)
+
+ # In case VM don't exist, wait for VM DOWN state,
+ # otherwise don't wait for any state, just update VM:
+ vms_module.create(
+ entity=vm,
+ result_state=otypes.VmStatus.DOWN if vm is None else None,
+ clone=module.params['clone'],
+ clone_permissions=module.params['clone_permissions'],
+ )
+ ret = vms_module.action(
+ action='start',
+ post_action=vms_module._post_start_action,
+ action_condition=lambda vm: (
+ vm.status not in [
+ otypes.VmStatus.MIGRATING,
+ otypes.VmStatus.POWERING_UP,
+ otypes.VmStatus.REBOOT_IN_PROGRESS,
+ otypes.VmStatus.WAIT_FOR_LAUNCH,
+ otypes.VmStatus.UP,
+ otypes.VmStatus.RESTORING_STATE,
+ ]
+ ),
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ # Start action kwargs:
+ use_cloud_init=cloud_init is not None or len(cloud_init_nics) > 0,
+ use_sysprep=sysprep is not None,
+ vm=otypes.Vm(
+ placement_policy=otypes.VmPlacementPolicy(
+ hosts=[otypes.Host(name=module.params['host'])]
+ ) if module.params['host'] else None,
+ initialization=_get_initialization(sysprep, cloud_init, cloud_init_nics),
+ ),
+ )
+
+ if state == 'next_run':
+ # Apply next run configuration, if needed:
+ vm = vms_service.vm_service(ret['id']).get()
+ if vm.next_run_configuration_exists:
+ ret = vms_module.action(
+ action='reboot',
+ entity=vm,
+ action_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.UP,
+ )
+ elif state == 'stopped':
+ vms_module.create(
+ result_state=otypes.VmStatus.DOWN if vm is None else None,
+ clone=module.params['clone'],
+ clone_permissions=module.params['clone_permissions'],
+ )
+ if module.params['force']:
+ ret = vms_module.action(
+ action='stop',
+ post_action=vms_module._attach_cd,
+ action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ )
+ else:
+ ret = vms_module.action(
+ action='shutdown',
+ pre_action=vms_module._pre_shutdown_action,
+ post_action=vms_module._attach_cd,
+ action_condition=lambda vm: vm.status != otypes.VmStatus.DOWN,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.DOWN,
+ )
+ elif state == 'suspended':
+ vms_module.create(
+ result_state=otypes.VmStatus.DOWN if vm is None else None,
+ clone=module.params['clone'],
+ clone_permissions=module.params['clone_permissions'],
+ )
+ ret = vms_module.action(
+ action='suspend',
+ pre_action=vms_module._pre_suspend_action,
+ action_condition=lambda vm: vm.status != otypes.VmStatus.SUSPENDED,
+ wait_condition=lambda vm: vm.status == otypes.VmStatus.SUSPENDED,
+ )
+ elif state == 'absent':
+ ret = vms_module.remove()
+
+ module.exit_json(**ret)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+ finally:
+ connection.close(logout=False)
+
+from ansible.module_utils.basic import *
+if __name__ == "__main__":
+ main()
diff --git a/lib/ansible/modules/cloud/ovirt/ovirt_vms_facts.py b/lib/ansible/modules/cloud/ovirt/ovirt_vms_facts.py
new file mode 100644
index 0000000000..2a11ad7528
--- /dev/null
+++ b/lib/ansible/modules/cloud/ovirt/ovirt_vms_facts.py
@@ -0,0 +1,104 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright (c) 2016 Red Hat, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.ovirt import (
+ check_sdk,
+ create_connection,
+ get_dict_of_struct,
+ ovirt_full_argument_spec,
+)
+
+
+ANSIBLE_METADATA = {'status': 'preview',
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: ovirt_vms_facts
+short_description: Retrieve facts about one or more oVirt virtual machines
+author: "Ondra Machacek (@machacekondra)"
+version_added: "2.3"
+description:
+ - "Retrieve facts about one or more oVirt virtual machines."
+notes:
+ - "This module creates a new top-level C(ovirt_vms) fact, which
+ contains a list of virtual machines."
+options:
+ pattern:
+ description:
+ - "Search term which is accepted by oVirt search backend."
+ - "For example to search VM X from cluster Y use following pattern:
+ name=X and cluster=Y"
+extends_documentation_fragment: ovirt
+'''
+
+EXAMPLES = '''
+# Examples don't contain auth parameter for simplicity,
+# look at ovirt_auth module to see how to reuse authentication:
+
+# Gather facts about all VMs which names start with C(centos) and
+# belong to cluster C(west):
+- ovirt_vms_facts:
+ pattern: name=centos* and cluster=west
+- debug:
+ var: ovirt_vms
+'''
+
+RETURN = '''
+ovirt_vms:
+ description: "List of dictionaries describing the VMs. VM attribues are mapped to dictionary keys,
+ all VMs attributes can be found at following url: https://ovirt.example.com/ovirt-engine/api/model#types/vm."
+ returned: On success.
+ type: list
+'''
+
+
+def main():
+ argument_spec = ovirt_full_argument_spec(
+ pattern=dict(default='', required=False),
+ )
+ module = AnsibleModule(argument_spec)
+ check_sdk(module)
+
+ try:
+ connection = create_connection(module.params.pop('auth'))
+ vms_service = connection.system_service().vms_service()
+ vms = vms_service.list(search=module.params['pattern'])
+ module.exit_json(
+ changed=False,
+ ansible_facts=dict(
+ ovirt_vms=[
+ get_dict_of_struct(c) for c in vms
+ ],
+ ),
+ )
+ except Exception as e:
+ module.fail_json(msg=str(e), exception=traceback.format_exc())
+ finally:
+ connection.close(logout=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/profitbricks/__init__.py b/lib/ansible/modules/cloud/profitbricks/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/cloud/profitbricks/__init__.py
diff --git a/lib/ansible/modules/cloud/profitbricks/profitbricks.py b/lib/ansible/modules/cloud/profitbricks/profitbricks.py
new file mode 100644
index 0000000000..cfafc8e0a4
--- /dev/null
+++ b/lib/ansible/modules/cloud/profitbricks/profitbricks.py
@@ -0,0 +1,674 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: profitbricks
+short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine.
+description:
+ - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0
+version_added: "2.0"
+options:
+ auto_increment:
+ description:
+ - Whether or not to increment a single number in the name for created virtual machines.
+ default: yes
+ choices: ["yes", "no"]
+ name:
+ description:
+ - The name of the virtual machine.
+ required: true
+ image:
+ description:
+ - The system image ID for creating the virtual machine, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8.
+ required: true
+ image_password:
+ description:
+ - Password set for the administrative user.
+ required: false
+ version_added: '2.2'
+ ssh_keys:
+ description:
+ - Public SSH keys allowing access to the virtual machine.
+ required: false
+ version_added: '2.2'
+ datacenter:
+ description:
+ - The datacenter to provision this virtual machine.
+ required: false
+ default: null
+ cores:
+ description:
+ - The number of CPU cores to allocate to the virtual machine.
+ required: false
+ default: 2
+ ram:
+ description:
+ - The amount of memory to allocate to the virtual machine.
+ required: false
+ default: 2048
+ cpu_family:
+ description:
+ - The CPU family type to allocate to the virtual machine.
+ required: false
+ default: AMD_OPTERON
+ choices: [ "AMD_OPTERON", "INTEL_XEON" ]
+ version_added: '2.2'
+ volume_size:
+ description:
+ - The size in GB of the boot volume.
+ required: false
+ default: 10
+ bus:
+ description:
+ - The bus type for the volume.
+ required: false
+ default: VIRTIO
+ choices: [ "IDE", "VIRTIO"]
+ instance_ids:
+ description:
+ - list of instance ids, currently only used when state='absent' to remove instances.
+ required: false
+ count:
+ description:
+ - The number of virtual machines to create.
+ required: false
+ default: 1
+ location:
+ description:
+ - The datacenter location. Use only if you want to create the Datacenter or else this value is ignored.
+ required: false
+ default: us/las
+ choices: [ "us/las", "de/fra", "de/fkb" ]
+ assign_public_ip:
+ description:
+ - This will assign the machine to the public LAN. If no LAN exists with public Internet access it is created.
+ required: false
+ default: false
+ lan:
+ description:
+ - The ID of the LAN you wish to add the servers to.
+ required: false
+ default: 1
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
+ required: false
+ default: null
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
+ required: false
+ default: null
+ wait:
+ description:
+ - wait for the instance to be in state 'running' before returning
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 600
+ remove_boot_volume:
+ description:
+ - remove the bootVolume of the virtual machine you're destroying.
+ required: false
+ default: "yes"
+ choices: ["yes", "no"]
+ state:
+ description:
+ - create or terminate instances
+ required: false
+ default: 'present'
+ choices: [ "running", "stopped", "absent", "present" ]
+
+requirements:
+ - "profitbricks"
+ - "python >= 2.6"
+author: Matt Baldwin (baldwin@stackpointcloud.com)
+'''
+
+EXAMPLES = '''
+
+# Note: These examples do not set authentication details, see the AWS Guide for details.
+
+# Provisioning example. This will create three servers and enumerate their names.
+
+- profitbricks:
+ datacenter: Tardis One
+ name: web%02d.stackpointcloud.com
+ cores: 4
+ ram: 2048
+ volume_size: 50
+ cpu_family: INTEL_XEON
+ image: a3eae284-a2fe-11e4-b187-5f1f641608c8
+ location: us/las
+ count: 3
+ assign_public_ip: true
+
+# Removing Virtual machines
+
+- profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: absent
+
+# Starting Virtual Machines.
+
+- profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: running
+
+# Stopping Virtual Machines
+
+- profitbricks:
+ datacenter: Tardis One
+ instance_ids:
+ - 'web001.stackpointcloud.com'
+ - 'web002.stackpointcloud.com'
+ - 'web003.stackpointcloud.com'
+ wait_timeout: 500
+ state: stopped
+
+'''
+
+import re
+import uuid
+import time
+
+HAS_PB_SDK = True
+
+try:
+ from profitbricks.client import ProfitBricksService, Volume, Server, Datacenter, NIC, LAN
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+
+
+LOCATIONS = ['us/las',
+ 'de/fra',
+ 'de/fkb']
+
+uuid_match = re.compile(
+ '[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise: return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _create_machine(module, profitbricks, datacenter, name):
+ cores = module.params.get('cores')
+ ram = module.params.get('ram')
+ cpu_family = module.params.get('cpu_family')
+ volume_size = module.params.get('volume_size')
+ disk_type = module.params.get('disk_type')
+ image_password = module.params.get('image_password')
+ ssh_keys = module.params.get('ssh_keys')
+ bus = module.params.get('bus')
+ lan = module.params.get('lan')
+ assign_public_ip = module.params.get('assign_public_ip')
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+ location = module.params.get('location')
+ image = module.params.get('image')
+ assign_public_ip = module.boolean(module.params.get('assign_public_ip'))
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ if assign_public_ip:
+ public_found = False
+
+ lans = profitbricks.list_lans(datacenter)
+ for lan in lans['items']:
+ if lan['properties']['public']:
+ public_found = True
+ lan = lan['id']
+
+ if not public_found:
+ i = LAN(
+ name='public',
+ public=True)
+
+ lan_response = profitbricks.create_lan(datacenter, i)
+ _wait_for_completion(profitbricks, lan_response,
+ wait_timeout, "_create_machine")
+ lan = lan_response['id']
+
+ v = Volume(
+ name=str(uuid.uuid4()).replace('-', '')[:10],
+ size=volume_size,
+ image=image,
+ image_password=image_password,
+ ssh_keys=ssh_keys,
+ disk_type=disk_type,
+ bus=bus)
+
+ n = NIC(
+ lan=int(lan)
+ )
+
+ s = Server(
+ name=name,
+ ram=ram,
+ cores=cores,
+ cpu_family=cpu_family,
+ create_volumes=[v],
+ nics=[n],
+ )
+
+ try:
+ create_server_response = profitbricks.create_server(
+ datacenter_id=datacenter, server=s)
+
+ _wait_for_completion(profitbricks, create_server_response,
+ wait_timeout, "create_virtual_machine")
+
+ server_response = profitbricks.get_server(
+ datacenter_id=datacenter,
+ server_id=create_server_response['id'],
+ depth=3
+ )
+ except Exception as e:
+ module.fail_json(msg="failed to create the new server: %s" % str(e))
+ else:
+ return server_response
+
+
+def _startstop_machine(module, profitbricks, datacenter_id, server_id):
+ state = module.params.get('state')
+
+ try:
+ if state == 'running':
+ profitbricks.start_server(datacenter_id, server_id)
+ else:
+ profitbricks.stop_server(datacenter_id, server_id)
+
+ return True
+ except Exception as e:
+ module.fail_json(msg="failed to start or stop the virtual machine %s at %s: %s" % (server_id, datacenter_id, str(e)))
+
+
+def _create_datacenter(module, profitbricks):
+ datacenter = module.params.get('datacenter')
+ location = module.params.get('location')
+ wait_timeout = module.params.get('wait_timeout')
+
+ i = Datacenter(
+ name=datacenter,
+ location=location
+ )
+
+ try:
+ datacenter_response = profitbricks.create_datacenter(datacenter=i)
+
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "_create_datacenter")
+
+ return datacenter_response
+ except Exception as e:
+ module.fail_json(msg="failed to create the new server(s): %s" % str(e))
+
+
+def create_virtual_machine(module, profitbricks):
+ """
+ Create new virtual machine
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object
+
+ Returns:
+ True if a new virtual machine was created, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ name = module.params.get('name')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+ lan = module.params.get('lan')
+ wait_timeout = module.params.get('wait_timeout')
+ failed = True
+ datacenter_found = False
+
+ virtual_machines = []
+ virtual_machine_ids = []
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if datacenter_id:
+ datacenter_found = True
+
+ if not datacenter_found:
+ datacenter_response = _create_datacenter(module, profitbricks)
+ datacenter_id = datacenter_response['id']
+
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "create_virtual_machine")
+
+ if auto_increment:
+ numbers = set()
+ count_offset = 1
+
+ try:
+ name % 0
+ except TypeError:
+ e = get_exception()
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message)
+
+ number_range = xrange(count_offset, count_offset + count + len(numbers))
+ available_numbers = list(set(number_range).difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ names = [name]
+
+ # Prefetch a list of servers for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for name in names:
+ # Skip server creation if the server already exists.
+ if _get_server_id(server_list, name):
+ continue
+
+ create_response = _create_machine(module, profitbricks, str(datacenter_id), name)
+ nics = profitbricks.list_nics(datacenter_id, create_response['id'])
+ for n in nics['items']:
+ if lan == n['properties']['lan']:
+ create_response.update({'public_ip': n['properties']['ips'][0]})
+
+ virtual_machines.append(create_response)
+
+ failed = False
+
+ results = {
+ 'failed': failed,
+ 'machines': virtual_machines,
+ 'action': 'create',
+ 'instance_ids': {
+ 'instances': [i['id'] for i in virtual_machines],
+ }
+ }
+
+ return results
+
+
+def remove_virtual_machine(module, profitbricks):
+ """
+ Removes a virtual machine.
+
+ This will remove the virtual machine along with the bootVolume.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Not yet supported: handle deletion of attached data disks.
+
+ Returns:
+ True if a new virtual server was deleted, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ instance_ids = module.params.get('instance_ids')
+ remove_boot_volume = module.params.get('remove_boot_volume')
+ changed = False
+
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if not datacenter_id:
+ module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
+
+ # Prefetch server list for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for instance in instance_ids:
+ # Locate UUID for server if referenced by name.
+ server_id = _get_server_id(server_list, instance)
+ if server_id:
+ # Remove the server's boot volume
+ if remove_boot_volume:
+ _remove_boot_volume(module, profitbricks, datacenter_id, server_id)
+
+ # Remove the server
+ try:
+ server_response = profitbricks.delete_server(datacenter_id, server_id)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg="failed to terminate the virtual server: %s" % str(e))
+ else:
+ changed = True
+
+ return changed
+
+
+def _remove_boot_volume(module, profitbricks, datacenter_id, server_id):
+ """
+ Remove the boot volume from the server
+ """
+ try:
+ server = profitbricks.get_server(datacenter_id, server_id)
+ volume_id = server['properties']['bootVolume']['id']
+ volume_response = profitbricks.delete_volume(datacenter_id, volume_id)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg="failed to remove the server's boot volume: %s" % str(e))
+
+
+def startstop_machine(module, profitbricks, state):
+ """
+ Starts or Stops a virtual machine.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True when the servers process the action successfully, false otherwise.
+ """
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ changed = False
+
+ datacenter = module.params.get('datacenter')
+ instance_ids = module.params.get('instance_ids')
+
+ # Locate UUID for datacenter if referenced by name.
+ datacenter_list = profitbricks.list_datacenters()
+ datacenter_id = _get_datacenter_id(datacenter_list, datacenter)
+ if not datacenter_id:
+ module.fail_json(msg='Virtual data center \'%s\' not found.' % str(datacenter))
+
+ # Prefetch server list for later comparison.
+ server_list = profitbricks.list_servers(datacenter_id)
+ for instance in instance_ids:
+ # Locate UUID of server if referenced by name.
+ server_id = _get_server_id(server_list, instance)
+ if server_id:
+ _startstop_machine(module, profitbricks, datacenter_id, server_id)
+ changed = True
+
+ if wait:
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ matched_instances = []
+ for res in profitbricks.list_servers(datacenter_id)['items']:
+ if state == 'running':
+ if res['properties']['vmState'].lower() == state:
+ matched_instances.append(res)
+ elif state == 'stopped':
+ if res['properties']['vmState'].lower() == 'shutoff':
+ matched_instances.append(res)
+
+ if len(matched_instances) < len(instance_ids):
+ time.sleep(5)
+ else:
+ break
+
+ if wait_timeout <= time.time():
+ # waiting took too long
+ module.fail_json(msg="wait for virtual machine state timeout on %s" % time.asctime())
+
+ return (changed)
+
+
+def _get_datacenter_id(datacenters, identity):
+ """
+ Fetch and return datacenter UUID by datacenter name if found.
+ """
+ for datacenter in datacenters['items']:
+ if identity in (datacenter['properties']['name'], datacenter['id']):
+ return datacenter['id']
+ return None
+
+
+def _get_server_id(servers, identity):
+ """
+ Fetch and return server UUID by server name if found.
+ """
+ for server in servers['items']:
+ if identity in (server['properties']['name'], server['id']):
+ return server['id']
+ return None
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ name=dict(),
+ image=dict(),
+ cores=dict(type='int', default=2),
+ ram=dict(type='int', default=2048),
+ cpu_family=dict(choices=['AMD_OPTERON', 'INTEL_XEON'],
+ default='AMD_OPTERON'),
+ volume_size=dict(type='int', default=10),
+ disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
+ image_password=dict(default=None),
+ ssh_keys=dict(type='list', default=[]),
+ bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
+ lan=dict(type='int', default=1),
+ count=dict(type='int', default=1),
+ auto_increment=dict(type='bool', default=True),
+ instance_ids=dict(type='list', default=[]),
+ subscription_user=dict(),
+ subscription_password=dict(),
+ location=dict(choices=LOCATIONS, default='us/las'),
+ assign_public_ip=dict(type='bool', default=False),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ remove_boot_volume=dict(type='bool', default=True),
+ state=dict(default='present'),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required ' +
+ 'for running or stopping machines.')
+
+ try:
+ (changed) = remove_virtual_machine(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg='failed to set instance state: %s' % str(e))
+
+ elif state in ('running', 'stopped'):
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for ' +
+ 'running or stopping machines.')
+ try:
+ (changed) = startstop_machine(module, profitbricks, state)
+ module.exit_json(changed=changed)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg='failed to set instance state: %s' % str(e))
+
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for new instance')
+ if not module.params.get('image'):
+ module.fail_json(msg='image parameter is required for new instance')
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is ' +
+ 'required for new instance')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is ' +
+ 'required for new instance')
+
+ try:
+ (machine_dict_array) = create_virtual_machine(module, profitbricks)
+ module.exit_json(**machine_dict_array)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg='failed to set instance state: %s' % str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/profitbricks/profitbricks_datacenter.py b/lib/ansible/modules/cloud/profitbricks/profitbricks_datacenter.py
new file mode 100644
index 0000000000..b6ce237165
--- /dev/null
+++ b/lib/ansible/modules/cloud/profitbricks/profitbricks_datacenter.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: profitbricks_datacenter
+short_description: Create or destroy a ProfitBricks Virtual Datacenter.
+description:
+ - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency on profitbricks >= 1.0.0
+version_added: "2.0"
+options:
+ name:
+ description:
+ - The name of the virtual datacenter.
+ required: true
+ description:
+ description:
+ - The description of the virtual datacenter.
+ required: false
+ location:
+ description:
+ - The datacenter location.
+ required: false
+ default: us/las
+ choices: [ "us/las", "de/fra", "de/fkb" ]
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
+ required: false
+ wait:
+ description:
+ - wait for the datacenter to be created before returning
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 600
+ state:
+ description:
+ - create or terminate datacenters
+ required: false
+ default: 'present'
+ choices: [ "present", "absent" ]
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (baldwin@stackpointcloud.com)
+'''
+
+EXAMPLES = '''
+
+# Create a Datacenter
+- profitbricks_datacenter:
+ datacenter: Tardis One
+ wait_timeout: 500
+
+# Destroy a Datacenter. This will remove all servers, volumes, and other objects in the datacenter.
+- profitbricks_datacenter:
+ datacenter: Tardis One
+ wait_timeout: 500
+ state: absent
+
+'''
+
+import re
+import uuid
+import time
+import sys
+
+HAS_PB_SDK = True
+
+try:
+ from profitbricks.client import ProfitBricksService, Datacenter
+except ImportError:
+ HAS_PB_SDK = False
+
+LOCATIONS = ['us/las',
+ 'de/fra',
+ 'de/fkb']
+
+uuid_match = re.compile(
+ '[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise: return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+def _remove_datacenter(module, profitbricks, datacenter):
+ try:
+ profitbricks.delete_datacenter(datacenter)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the datacenter: %s" % str(e))
+
+def create_datacenter(module, profitbricks):
+ """
+ Creates a Datacenter
+
+ This will create a new Datacenter in the specified location.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if a new datacenter was created, false otherwise
+ """
+ name = module.params.get('name')
+ location = module.params.get('location')
+ description = module.params.get('description')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
+ virtual_datacenters = []
+
+
+ i = Datacenter(
+ name=name,
+ location=location,
+ description=description
+ )
+
+ try:
+ datacenter_response = profitbricks.create_datacenter(datacenter=i)
+
+ if wait:
+ _wait_for_completion(profitbricks, datacenter_response,
+ wait_timeout, "_create_datacenter")
+
+ results = {
+ 'datacenter_id': datacenter_response['id']
+ }
+
+ return results
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the new datacenter: %s" % str(e))
+
+def remove_datacenter(module, profitbricks):
+ """
+ Removes a Datacenter.
+
+ This will remove a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the datacenter was deleted, false otherwise
+ """
+ name = module.params.get('name')
+ changed = False
+
+ if(uuid_match.match(name)):
+ _remove_datacenter(module, profitbricks, name)
+ changed = True
+ else:
+ datacenters = profitbricks.list_datacenters()
+
+ for d in datacenters['items']:
+ vdc = profitbricks.get_datacenter(d['id'])
+
+ if name == vdc['properties']['name']:
+ name = d['id']
+ _remove_datacenter(module, profitbricks, name)
+ changed = True
+
+ return changed
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(),
+ description=dict(),
+ location=dict(choices=LOCATIONS, default='us/las'),
+ subscription_user=dict(),
+ subscription_password=dict(),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(default=600),
+ state=dict(default='present'),
+ )
+ )
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required deleting a virtual datacenter.')
+
+ try:
+ (changed) = remove_datacenter(module, profitbricks)
+ module.exit_json(
+ changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set datacenter state: %s' % str(e))
+
+ elif state == 'present':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for a new datacenter')
+ if not module.params.get('location'):
+ module.fail_json(msg='location parameter is required for a new datacenter')
+
+ try:
+ (datacenter_dict_array) = create_datacenter(module, profitbricks)
+ module.exit_json(**datacenter_dict_array)
+ except Exception as e:
+ module.fail_json(msg='failed to set datacenter state: %s' % str(e))
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/profitbricks/profitbricks_nic.py b/lib/ansible/modules/cloud/profitbricks/profitbricks_nic.py
new file mode 100644
index 0000000000..01377a338b
--- /dev/null
+++ b/lib/ansible/modules/cloud/profitbricks/profitbricks_nic.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: profitbricks_nic
+short_description: Create or Remove a NIC.
+description:
+ - This module allows you to create or restore a volume snapshot. This module has a dependency on profitbricks >= 1.0.0
+version_added: "2.0"
+options:
+ datacenter:
+ description:
+ - The datacenter in which to operate.
+ required: true
+ server:
+ description:
+ - The server name or ID.
+ required: true
+ name:
+ description:
+ - The name or ID of the NIC. This is only required on deletes, but not on create.
+ required: true
+ lan:
+ description:
+ - The LAN to place the NIC on. You can pass a LAN that doesn't exist and it will be created. Required on create.
+ required: true
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
+ required: false
+ wait:
+ description:
+ - wait for the operation to complete before returning
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 600
+ state:
+ description:
+ - Indicate desired state of the resource
+ required: false
+ default: 'present'
+ choices: ["present", "absent"]
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (baldwin@stackpointcloud.com)
+'''
+
+EXAMPLES = '''
+
+# Create a NIC
+- profitbricks_nic:
+ datacenter: Tardis One
+ server: node002
+ lan: 2
+ wait_timeout: 500
+ state: present
+
+# Remove a NIC
+- profitbricks_nic:
+ datacenter: Tardis One
+ server: node002
+ name: 7341c2454f
+ wait_timeout: 500
+ state: absent
+
+'''
+
+import re
+import uuid
+import time
+
+HAS_PB_SDK = True
+
+try:
+ from profitbricks.client import ProfitBricksService, NIC
+except ImportError:
+ HAS_PB_SDK = False
+
+uuid_match = re.compile(
+ '[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise: return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+def create_nic(module, profitbricks):
+ """
+ Creates a NIC.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the nic creates, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ lan = module.params.get('lan')
+ name = module.params.get('name')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+ try:
+ n = NIC(
+ name=name,
+ lan=lan
+ )
+
+ nic_response = profitbricks.create_nic(datacenter, server, n)
+
+ if wait:
+ _wait_for_completion(profitbricks, nic_response,
+ wait_timeout, "create_nic")
+
+ return nic_response
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the NIC: %s" % str(e))
+
+def delete_nic(module, profitbricks):
+ """
+ Removes a NIC
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the NIC was removed, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ name = module.params.get('name')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ server_found = False
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server_found = True
+ server = s['id']
+ break
+
+ if not server_found:
+ return False
+
+ # Locate UUID for NIC
+ nic_found = False
+ if not (uuid_match.match(name)):
+ nic_list = profitbricks.list_nics(datacenter, server)
+ for n in nic_list['items']:
+ if name == n['properties']['name']:
+ nic_found = True
+ name = n['id']
+ break
+
+ if not nic_found:
+ return False
+
+ try:
+ nic_response = profitbricks.delete_nic(datacenter, server, name)
+ return nic_response
+ except Exception as e:
+ module.fail_json(msg="failed to remove the NIC: %s" % str(e))
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ server=dict(),
+ name=dict(default=str(uuid.uuid4()).replace('-','')[:10]),
+ lan=dict(),
+ subscription_user=dict(),
+ subscription_password=dict(),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required')
+ if not module.params.get('server'):
+ module.fail_json(msg='server parameter is required')
+
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required')
+
+ try:
+ (changed) = delete_nic(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set nic state: %s' % str(e))
+
+ elif state == 'present':
+ if not module.params.get('lan'):
+ module.fail_json(msg='lan parameter is required')
+
+ try:
+ (nic_dict) = create_nic(module, profitbricks)
+ module.exit_json(nics=nic_dict)
+ except Exception as e:
+ module.fail_json(msg='failed to set nic state: %s' % str(e))
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/profitbricks/profitbricks_volume.py b/lib/ansible/modules/cloud/profitbricks/profitbricks_volume.py
new file mode 100644
index 0000000000..caed8579aa
--- /dev/null
+++ b/lib/ansible/modules/cloud/profitbricks/profitbricks_volume.py
@@ -0,0 +1,434 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: profitbricks_volume
+short_description: Create or destroy a volume.
+description:
+ - Allows you to create or remove a volume from a ProfitBricks datacenter. This module has a dependency on profitbricks >= 1.0.0
+version_added: "2.0"
+options:
+ datacenter:
+ description:
+ - The datacenter in which to create the volumes.
+ required: true
+ name:
+ description:
+ - The name of the volumes. You can enumerate the names using auto_increment.
+ required: true
+ size:
+ description:
+ - The size of the volume.
+ required: false
+ default: 10
+ bus:
+ description:
+ - The bus type.
+ required: false
+ default: VIRTIO
+ choices: [ "IDE", "VIRTIO"]
+ image:
+ description:
+ - The system image ID for the volume, e.g. a3eae284-a2fe-11e4-b187-5f1f641608c8. This can also be a snapshot image ID.
+ required: true
+ image_password:
+ description:
+ - Password set for the administrative user.
+ required: false
+ version_added: '2.2'
+ ssh_keys:
+ description:
+ - Public SSH keys allowing access to the virtual machine.
+ required: false
+ version_added: '2.2'
+ disk_type:
+ description:
+ - The disk type of the volume.
+ required: false
+ default: HDD
+ choices: [ "HDD", "SSD" ]
+ licence_type:
+ description:
+ - The licence type for the volume. This is used when the image is non-standard.
+ required: false
+ default: UNKNOWN
+ choices: ["LINUX", "WINDOWS", "UNKNOWN" , "OTHER"]
+ count:
+ description:
+ - The number of volumes you wish to create.
+ required: false
+ default: 1
+ auto_increment:
+ description:
+ - Whether or not to increment a single number in the name for created virtual machines.
+ default: yes
+ choices: ["yes", "no"]
+ instance_ids:
+ description:
+ - list of instance ids, currently only used when state='absent' to remove instances.
+ required: false
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
+ required: false
+ wait:
+ description:
+ - wait for the datacenter to be created before returning
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 600
+ state:
+ description:
+ - create or terminate datacenters
+ required: false
+ default: 'present'
+ choices: ["present", "absent"]
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (baldwin@stackpointcloud.com)
+'''
+
+EXAMPLES = '''
+
+# Create Multiple Volumes
+
+- profitbricks_volume:
+ datacenter: Tardis One
+ name: vol%02d
+ count: 5
+ auto_increment: yes
+ wait_timeout: 500
+ state: present
+
+# Remove Volumes
+
+- profitbricks_volume:
+ datacenter: Tardis One
+ instance_ids:
+ - 'vol01'
+ - 'vol02'
+ wait_timeout: 500
+ state: absent
+
+'''
+
+import re
+import time
+
+HAS_PB_SDK = True
+
+try:
+ from profitbricks.client import ProfitBricksService, Volume
+except ImportError:
+ HAS_PB_SDK = False
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+
+
+uuid_match = re.compile(
+ '[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise: return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+
+def _create_volume(module, profitbricks, datacenter, name):
+ size = module.params.get('size')
+ bus = module.params.get('bus')
+ image = module.params.get('image')
+ image_password = module.params.get('image_password')
+ ssh_keys = module.params.get('ssh_keys')
+ disk_type = module.params.get('disk_type')
+ licence_type = module.params.get('licence_type')
+ wait_timeout = module.params.get('wait_timeout')
+ wait = module.params.get('wait')
+
+ try:
+ v = Volume(
+ name=name,
+ size=size,
+ bus=bus,
+ image=image,
+ image_password=image_password,
+ ssh_keys=ssh_keys,
+ disk_type=disk_type,
+ licence_type=licence_type
+ )
+
+ volume_response = profitbricks.create_volume(datacenter, v)
+
+ if wait:
+ _wait_for_completion(profitbricks, volume_response,
+ wait_timeout, "_create_volume")
+
+ except Exception as e:
+ module.fail_json(msg="failed to create the volume: %s" % str(e))
+
+ return volume_response
+
+
+def _delete_volume(module, profitbricks, datacenter, volume):
+ try:
+ profitbricks.delete_volume(datacenter, volume)
+ except Exception as e:
+ module.fail_json(msg="failed to remove the volume: %s" % str(e))
+
+
+def create_volume(module, profitbricks):
+ """
+ Creates a volume.
+
+ This will create a volume in a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was created, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ name = module.params.get('name')
+ auto_increment = module.params.get('auto_increment')
+ count = module.params.get('count')
+
+ datacenter_found = False
+ failed = True
+ volumes = []
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ datacenter_found = True
+ break
+
+ if not datacenter_found:
+ module.fail_json(msg='datacenter could not be found.')
+
+ if auto_increment:
+ numbers = set()
+ count_offset = 1
+
+ try:
+ name % 0
+ except TypeError:
+ e = get_exception()
+ if e.message.startswith('not all'):
+ name = '%s%%d' % name
+ else:
+ module.fail_json(msg=e.message)
+
+ number_range = xrange(count_offset, count_offset + count + len(numbers))
+ available_numbers = list(set(number_range).difference(numbers))
+ names = []
+ numbers_to_use = available_numbers[:count]
+ for number in numbers_to_use:
+ names.append(name % number)
+ else:
+ names = [name] * count
+
+ for name in names:
+ create_response = _create_volume(module, profitbricks, str(datacenter), name)
+ volumes.append(create_response)
+ _attach_volume(module, profitbricks, datacenter, create_response['id'])
+ failed = False
+
+ results = {
+ 'failed': failed,
+ 'volumes': volumes,
+ 'action': 'create',
+ 'instance_ids': {
+ 'instances': [i['id'] for i in volumes],
+ }
+ }
+
+ return results
+
+
+def delete_volume(module, profitbricks):
+ """
+ Removes a volume.
+
+ This will create a volume in a datacenter.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was removed, false otherwise
+ """
+ if not isinstance(module.params.get('instance_ids'), list) or len(module.params.get('instance_ids')) < 1:
+ module.fail_json(msg='instance_ids should be a list of virtual machine ids or names, aborting')
+
+ datacenter = module.params.get('datacenter')
+ changed = False
+ instance_ids = module.params.get('instance_ids')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ for n in instance_ids:
+ if(uuid_match.match(n)):
+ _delete_volume(module, profitbricks, datacenter, volume)
+ changed = True
+ else:
+ volumes = profitbricks.list_volumes(datacenter)
+ for v in volumes['items']:
+ if n == v['properties']['name']:
+ volume_id = v['id']
+ _delete_volume(module, profitbricks, datacenter, volume_id)
+ changed = True
+
+ return changed
+
+
+def _attach_volume(module, profitbricks, datacenter, volume):
+ """
+ Attaches a volume.
+
+ This will attach a volume to the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was attached, false otherwise
+ """
+ server = module.params.get('server')
+
+ # Locate UUID for Server
+ if server:
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server = s['id']
+ break
+
+ try:
+ return profitbricks.attach_volume(datacenter, server, volume)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg='failed to attach volume: %s' % str(e))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ server=dict(),
+ name=dict(),
+ size=dict(type='int', default=10),
+ bus=dict(choices=['VIRTIO', 'IDE'], default='VIRTIO'),
+ image=dict(),
+ image_password=dict(default=None),
+ ssh_keys=dict(type='list', default=[]),
+ disk_type=dict(choices=['HDD', 'SSD'], default='HDD'),
+ licence_type=dict(default='UNKNOWN'),
+ count=dict(type='int', default=1),
+ auto_increment=dict(type='bool', default=True),
+ instance_ids=dict(type='list', default=[]),
+ subscription_user=dict(),
+ subscription_password=dict(),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ )
+ )
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for running or stopping machines.')
+
+ try:
+ (changed) = delete_volume(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg='failed to set volume state: %s' % str(e))
+
+ elif state == 'present':
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required for new instance')
+ if not module.params.get('name'):
+ module.fail_json(msg='name parameter is required for new instance')
+
+ try:
+ (volume_dict_array) = create_volume(module, profitbricks)
+ module.exit_json(**volume_dict_array)
+ except Exception:
+ e = get_exception()
+ module.fail_json(msg='failed to set volume state: %s' % str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/profitbricks/profitbricks_volume_attachments.py b/lib/ansible/modules/cloud/profitbricks/profitbricks_volume_attachments.py
new file mode 100644
index 0000000000..1904c470a5
--- /dev/null
+++ b/lib/ansible/modules/cloud/profitbricks/profitbricks_volume_attachments.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: profitbricks_volume_attachments
+short_description: Attach or detach a volume.
+description:
+ - Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0
+version_added: "2.0"
+options:
+ datacenter:
+ description:
+ - The datacenter in which to operate.
+ required: true
+ server:
+ description:
+ - The name of the server you wish to detach or attach the volume.
+ required: true
+ volume:
+ description:
+ - The volume name or ID.
+ required: true
+ subscription_user:
+ description:
+ - The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environement variable.
+ required: false
+ subscription_password:
+ description:
+ - THe ProfitBricks password. Overrides the PB_PASSWORD environement variable.
+ required: false
+ wait:
+ description:
+ - wait for the operation to complete before returning
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 600
+ state:
+ description:
+ - Indicate desired state of the resource
+ required: false
+ default: 'present'
+ choices: ["present", "absent"]
+
+requirements: [ "profitbricks" ]
+author: Matt Baldwin (baldwin@stackpointcloud.com)
+'''
+
+EXAMPLES = '''
+
+# Attach a Volume
+
+- profitbricks_volume_attachments:
+ datacenter: Tardis One
+ server: node002
+ volume: vol01
+ wait_timeout: 500
+ state: present
+
+# Detach a Volume
+
+- profitbricks_volume_attachments:
+ datacenter: Tardis One
+ server: node002
+ volume: vol01
+ wait_timeout: 500
+ state: absent
+
+'''
+
+import re
+import uuid
+import time
+
+HAS_PB_SDK = True
+
+try:
+ from profitbricks.client import ProfitBricksService, Volume
+except ImportError:
+ HAS_PB_SDK = False
+
+uuid_match = re.compile(
+ '[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
+
+
+def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
+ if not promise: return
+ wait_timeout = time.time() + wait_timeout
+ while wait_timeout > time.time():
+ time.sleep(5)
+ operation_result = profitbricks.get_request(
+ request_id=promise['requestId'],
+ status=True)
+
+ if operation_result['metadata']['status'] == "DONE":
+ return
+ elif operation_result['metadata']['status'] == "FAILED":
+ raise Exception(
+ 'Request failed to complete ' + msg + ' "' + str(
+ promise['requestId']) + '" to complete.')
+
+ raise Exception(
+ 'Timed out waiting for async operation ' + msg + ' "' + str(
+ promise['requestId']
+ ) + '" to complete.')
+
+def attach_volume(module, profitbricks):
+ """
+ Attaches a volume.
+
+ This will attach a volume to the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was attached, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ volume = module.params.get('volume')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server= s['id']
+ break
+
+ # Locate UUID for Volume
+ if not (uuid_match.match(volume)):
+ volume_list = profitbricks.list_volumes(datacenter)
+ for v in volume_list['items']:
+ if volume == v['properties']['name']:
+ volume = v['id']
+ break
+
+ return profitbricks.attach_volume(datacenter, server, volume)
+
+def detach_volume(module, profitbricks):
+ """
+ Detaches a volume.
+
+ This will remove a volume from the server.
+
+ module : AnsibleModule object
+ profitbricks: authenticated profitbricks object.
+
+ Returns:
+ True if the volume was detached, false otherwise
+ """
+ datacenter = module.params.get('datacenter')
+ server = module.params.get('server')
+ volume = module.params.get('volume')
+
+ # Locate UUID for Datacenter
+ if not (uuid_match.match(datacenter)):
+ datacenter_list = profitbricks.list_datacenters()
+ for d in datacenter_list['items']:
+ dc = profitbricks.get_datacenter(d['id'])
+ if datacenter == dc['properties']['name']:
+ datacenter = d['id']
+ break
+
+ # Locate UUID for Server
+ if not (uuid_match.match(server)):
+ server_list = profitbricks.list_servers(datacenter)
+ for s in server_list['items']:
+ if server == s['properties']['name']:
+ server= s['id']
+ break
+
+ # Locate UUID for Volume
+ if not (uuid_match.match(volume)):
+ volume_list = profitbricks.list_volumes(datacenter)
+ for v in volume_list['items']:
+ if volume == v['properties']['name']:
+ volume = v['id']
+ break
+
+ return profitbricks.detach_volume(datacenter, server, volume)
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ datacenter=dict(),
+ server=dict(),
+ volume=dict(),
+ subscription_user=dict(),
+ subscription_password=dict(),
+ wait=dict(type='bool', default=True),
+ wait_timeout=dict(type='int', default=600),
+ state=dict(default='present'),
+ )
+ )
+
+ if not HAS_PB_SDK:
+ module.fail_json(msg='profitbricks required for this module')
+
+ if not module.params.get('subscription_user'):
+ module.fail_json(msg='subscription_user parameter is required')
+ if not module.params.get('subscription_password'):
+ module.fail_json(msg='subscription_password parameter is required')
+ if not module.params.get('datacenter'):
+ module.fail_json(msg='datacenter parameter is required')
+ if not module.params.get('server'):
+ module.fail_json(msg='server parameter is required')
+ if not module.params.get('volume'):
+ module.fail_json(msg='volume parameter is required')
+
+ subscription_user = module.params.get('subscription_user')
+ subscription_password = module.params.get('subscription_password')
+
+ profitbricks = ProfitBricksService(
+ username=subscription_user,
+ password=subscription_password)
+
+ state = module.params.get('state')
+
+ if state == 'absent':
+ try:
+ (changed) = detach_volume(module, profitbricks)
+ module.exit_json(changed=changed)
+ except Exception as e:
+ module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
+ elif state == 'present':
+ try:
+ attach_volume(module, profitbricks)
+ module.exit_json()
+ except Exception as e:
+ module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/rackspace/rax_clb_ssl.py b/lib/ansible/modules/cloud/rackspace/rax_clb_ssl.py
new file mode 100644
index 0000000000..37c35b32de
--- /dev/null
+++ b/lib/ansible/modules/cloud/rackspace/rax_clb_ssl.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION='''
+module: rax_clb_ssl
+short_description: Manage SSL termination for a Rackspace Cloud Load Balancer.
+description:
+- Set up, reconfigure, or remove SSL termination for an existing load balancer.
+version_added: "2.0"
+options:
+ loadbalancer:
+ description:
+ - Name or ID of the load balancer on which to manage SSL termination.
+ required: true
+ state:
+ description:
+ - If set to "present", SSL termination will be added to this load balancer.
+ - If "absent", SSL termination will be removed instead.
+ choices:
+ - present
+ - absent
+ default: present
+ enabled:
+ description:
+ - If set to "false", temporarily disable SSL termination without discarding
+ - existing credentials.
+ default: true
+ private_key:
+ description:
+ - The private SSL key as a string in PEM format.
+ certificate:
+ description:
+ - The public SSL certificates as a string in PEM format.
+ intermediate_certificate:
+ description:
+ - One or more intermediate certificate authorities as a string in PEM
+ - format, concatenated into a single string.
+ secure_port:
+ description:
+ - The port to listen for secure traffic.
+ default: 443
+ secure_traffic_only:
+ description:
+ - If "true", the load balancer will *only* accept secure traffic.
+ default: false
+ https_redirect:
+ description:
+ - If "true", the load balancer will redirect HTTP traffic to HTTPS.
+ - Requires "secure_traffic_only" to be true. Incurs an implicit wait if SSL
+ - termination is also applied or removed.
+ wait:
+ description:
+ - Wait for the balancer to be in state "running" before turning.
+ default: false
+ wait_timeout:
+ description:
+ - How long before "wait" gives up, in seconds.
+ default: 300
+author: Ash Wilson
+extends_documentation_fragment: rackspace
+'''
+
+EXAMPLES = '''
+- name: Enable SSL termination on a load balancer
+ rax_clb_ssl:
+ loadbalancer: the_loadbalancer
+ state: present
+ private_key: "{{ lookup('file', 'credentials/server.key' ) }}"
+ certificate: "{{ lookup('file', 'credentials/server.crt' ) }}"
+ intermediate_certificate: "{{ lookup('file', 'credentials/trust-chain.crt') }}"
+ secure_traffic_only: true
+ wait: true
+
+- name: Disable SSL termination
+ rax_clb_ssl:
+ loadbalancer: "{{ registered_lb.balancer.id }}"
+ state: absent
+ wait: true
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def cloud_load_balancer_ssl(module, loadbalancer, state, enabled, private_key,
+ certificate, intermediate_certificate, secure_port,
+ secure_traffic_only, https_redirect,
+ wait, wait_timeout):
+ # Validate arguments.
+
+ if state == 'present':
+ if not private_key:
+ module.fail_json(msg="private_key must be provided.")
+ else:
+ private_key = private_key.strip()
+
+ if not certificate:
+ module.fail_json(msg="certificate must be provided.")
+ else:
+ certificate = certificate.strip()
+
+ attempts = wait_timeout / 5
+
+ # Locate the load balancer.
+
+ balancer = rax_find_loadbalancer(module, pyrax, loadbalancer)
+ existing_ssl = balancer.get_ssl_termination()
+
+ changed = False
+
+ if state == 'present':
+ # Apply or reconfigure SSL termination on the load balancer.
+ ssl_attrs = dict(
+ securePort=secure_port,
+ privatekey=private_key,
+ certificate=certificate,
+ intermediateCertificate=intermediate_certificate,
+ enabled=enabled,
+ secureTrafficOnly=secure_traffic_only
+ )
+
+ needs_change = False
+
+ if existing_ssl:
+ for ssl_attr, value in ssl_attrs.iteritems():
+ if ssl_attr == 'privatekey':
+ # The private key is not included in get_ssl_termination's
+ # output (as it shouldn't be). Also, if you're changing the
+ # private key, you'll also be changing the certificate,
+ # so we don't lose anything by not checking it.
+ continue
+
+ if value is not None and existing_ssl.get(ssl_attr) != value:
+ # module.fail_json(msg='Unnecessary change', attr=ssl_attr, value=value, existing=existing_ssl.get(ssl_attr))
+ needs_change = True
+ else:
+ needs_change = True
+
+ if needs_change:
+ try:
+ balancer.add_ssl_termination(**ssl_attrs)
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+ elif state == 'absent':
+ # Remove SSL termination if it's already configured.
+ if existing_ssl:
+ try:
+ balancer.delete_ssl_termination()
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+
+ if https_redirect is not None and balancer.httpsRedirect != https_redirect:
+ if changed:
+ # This wait is unavoidable because load balancers are immutable
+ # while the SSL termination changes above are being applied.
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ try:
+ balancer.update(httpsRedirect=https_redirect)
+ except pyrax.exceptions.PyraxException as e:
+ module.fail_json(msg='%s' % e.message)
+ changed = True
+
+ if changed and wait:
+ pyrax.utils.wait_for_build(balancer, interval=5, attempts=attempts)
+
+ balancer.get()
+ new_ssl_termination = balancer.get_ssl_termination()
+
+ # Intentionally omit the private key from the module output, so you don't
+ # accidentally echo it with `ansible-playbook -v` or `debug`, and the
+ # certificate, which is just long. Convert other attributes to snake_case
+ # and include https_redirect at the top-level.
+ if new_ssl_termination:
+ new_ssl = dict(
+ enabled=new_ssl_termination['enabled'],
+ secure_port=new_ssl_termination['securePort'],
+ secure_traffic_only=new_ssl_termination['secureTrafficOnly']
+ )
+ else:
+ new_ssl = None
+
+ result = dict(
+ changed=changed,
+ https_redirect=balancer.httpsRedirect,
+ ssl_termination=new_ssl,
+ balancer=rax_to_dict(balancer, 'clb')
+ )
+ success = True
+
+ if balancer.status == 'ERROR':
+ result['msg'] = '%s failed to build' % balancer.id
+ success = False
+ elif wait and balancer.status not in ('ACTIVE', 'ERROR'):
+ result['msg'] = 'Timeout waiting on %s' % balancer.id
+ success = False
+
+ if success:
+ module.exit_json(**result)
+ else:
+ module.fail_json(**result)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(dict(
+ loadbalancer=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ enabled=dict(type='bool', default=True),
+ private_key=dict(),
+ certificate=dict(),
+ intermediate_certificate=dict(),
+ secure_port=dict(type='int', default=443),
+ secure_traffic_only=dict(type='bool', default=False),
+ https_redirect=dict(type='bool'),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ ))
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module.')
+
+ loadbalancer = module.params.get('loadbalancer')
+ state = module.params.get('state')
+ enabled = module.boolean(module.params.get('enabled'))
+ private_key = module.params.get('private_key')
+ certificate = module.params.get('certificate')
+ intermediate_certificate = module.params.get('intermediate_certificate')
+ secure_port = module.params.get('secure_port')
+ secure_traffic_only = module.boolean(module.params.get('secure_traffic_only'))
+ https_redirect = module.boolean(module.params.get('https_redirect'))
+ wait = module.boolean(module.params.get('wait'))
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_load_balancer_ssl(
+ module, loadbalancer, state, enabled, private_key, certificate,
+ intermediate_certificate, secure_port, secure_traffic_only,
+ https_redirect, wait, wait_timeout
+ )
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/rackspace/rax_mon_alarm.py b/lib/ansible/modules/cloud/rackspace/rax_mon_alarm.py
new file mode 100644
index 0000000000..0df4fad340
--- /dev/null
+++ b/lib/ansible/modules/cloud/rackspace/rax_mon_alarm.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: rax_mon_alarm
+short_description: Create or delete a Rackspace Cloud Monitoring alarm.
+description:
+- Create or delete a Rackspace Cloud Monitoring alarm that associates an
+ existing rax_mon_entity, rax_mon_check, and rax_mon_notification_plan with
+ criteria that specify what conditions will trigger which levels of
+ notifications. Rackspace monitoring module flow | rax_mon_entity ->
+ rax_mon_check -> rax_mon_notification -> rax_mon_notification_plan ->
+ *rax_mon_alarm*
+version_added: "2.0"
+options:
+ state:
+ description:
+ - Ensure that the alarm with this C(label) exists or does not exist.
+ choices: [ "present", "absent" ]
+ required: false
+ default: present
+ label:
+ description:
+ - Friendly name for this alarm, used to achieve idempotence. Must be a String
+ between 1 and 255 characters long.
+ required: true
+ entity_id:
+ description:
+ - ID of the entity this alarm is attached to. May be acquired by registering
+ the value of a rax_mon_entity task.
+ required: true
+ check_id:
+ description:
+ - ID of the check that should be alerted on. May be acquired by registering
+ the value of a rax_mon_check task.
+ required: true
+ notification_plan_id:
+ description:
+ - ID of the notification plan to trigger if this alarm fires. May be acquired
+ by registering the value of a rax_mon_notification_plan task.
+ required: true
+ criteria:
+ description:
+ - Alarm DSL that describes alerting conditions and their output states. Must
+ be between 1 and 16384 characters long. See
+ http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/alerts-language.html
+ for a reference on the alerting language.
+ disabled:
+ description:
+ - If yes, create this alarm, but leave it in an inactive state. Defaults to
+ no.
+ choices: [ "yes", "no" ]
+ metadata:
+ description:
+ - Arbitrary key/value pairs to accompany the alarm. Must be a hash of String
+ keys and values between 1 and 255 characters long.
+author: Ash Wilson
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Alarm example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Ensure that a specific alarm exists.
+ rax_mon_alarm:
+ credentials: ~/.rax_pub
+ state: present
+ label: uhoh
+ entity_id: "{{ the_entity['entity']['id'] }}"
+ check_id: "{{ the_check['check']['id'] }}"
+ notification_plan_id: "{{ defcon1['notification_plan']['id'] }}"
+ criteria: >
+ if (rate(metric['average']) > 10) {
+ return new AlarmStatus(WARNING);
+ }
+ return new AlarmStatus(OK);
+ register: the_alarm
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def alarm(module, state, label, entity_id, check_id, notification_plan_id, criteria,
+ disabled, metadata):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ if criteria and len(criteria) < 1 or len(criteria) > 16384:
+ module.fail_json(msg='criteria must be between 1 and 16384 characters long')
+
+ # Coerce attributes.
+
+ changed = False
+ alarm = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = [a for a in cm.list_alarms(entity_id) if a.label == label]
+
+ if existing:
+ alarm = existing[0]
+
+ if state == 'present':
+ should_create = False
+ should_update = False
+ should_delete = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing alarms have the label %s.' %
+ (len(existing), label))
+
+ if alarm:
+ if check_id != alarm.check_id or notification_plan_id != alarm.notification_plan_id:
+ should_delete = should_create = True
+
+ should_update = (disabled and disabled != alarm.disabled) or \
+ (metadata and metadata != alarm.metadata) or \
+ (criteria and criteria != alarm.criteria)
+
+ if should_update and not should_delete:
+ cm.update_alarm(entity=entity_id, alarm=alarm,
+ criteria=criteria, disabled=disabled,
+ label=label, metadata=metadata)
+ changed = True
+
+ if should_delete:
+ alarm.delete()
+ changed = True
+ else:
+ should_create = True
+
+ if should_create:
+ alarm = cm.create_alarm(entity=entity_id, check=check_id,
+ notification_plan=notification_plan_id,
+ criteria=criteria, disabled=disabled, label=label,
+ metadata=metadata)
+ changed = True
+ else:
+ for a in existing:
+ a.delete()
+ changed = True
+
+ if alarm:
+ alarm_dict = {
+ "id": alarm.id,
+ "label": alarm.label,
+ "check_id": alarm.check_id,
+ "notification_plan_id": alarm.notification_plan_id,
+ "criteria": alarm.criteria,
+ "disabled": alarm.disabled,
+ "metadata": alarm.metadata
+ }
+ module.exit_json(changed=changed, alarm=alarm_dict)
+ else:
+ module.exit_json(changed=changed)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ entity_id=dict(required=True),
+ check_id=dict(required=True),
+ notification_plan_id=dict(required=True),
+ criteria=dict(),
+ disabled=dict(type='bool', default=False),
+ metadata=dict(type='dict')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+ label = module.params.get('label')
+ entity_id = module.params.get('entity_id')
+ check_id = module.params.get('check_id')
+ notification_plan_id = module.params.get('notification_plan_id')
+ criteria = module.params.get('criteria')
+ disabled = module.boolean(module.params.get('disabled'))
+ metadata = module.params.get('metadata')
+
+ setup_rax_module(module, pyrax)
+
+ alarm(module, state, label, entity_id, check_id, notification_plan_id,
+ criteria, disabled, metadata)
+
+
+# Import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+# Invoke the module.
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/rackspace/rax_mon_check.py b/lib/ansible/modules/cloud/rackspace/rax_mon_check.py
new file mode 100644
index 0000000000..c8bcfcd569
--- /dev/null
+++ b/lib/ansible/modules/cloud/rackspace/rax_mon_check.py
@@ -0,0 +1,318 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: rax_mon_check
+short_description: Create or delete a Rackspace Cloud Monitoring check for an
+ existing entity.
+description:
+- Create or delete a Rackspace Cloud Monitoring check associated with an
+ existing rax_mon_entity. A check is a specific test or measurement that is
+ performed, possibly from different monitoring zones, on the systems you
+ monitor. Rackspace monitoring module flow | rax_mon_entity ->
+ *rax_mon_check* -> rax_mon_notification -> rax_mon_notification_plan ->
+ rax_mon_alarm
+version_added: "2.0"
+options:
+ state:
+ description:
+ - Ensure that a check with this C(label) exists or does not exist.
+ choices: ["present", "absent"]
+ entity_id:
+ description:
+ - ID of the rax_mon_entity to target with this check.
+ required: true
+ label:
+ description:
+ - Defines a label for this check, between 1 and 64 characters long.
+ required: true
+ check_type:
+ description:
+ - The type of check to create. C(remote.) checks may be created on any
+ rax_mon_entity. C(agent.) checks may only be created on rax_mon_entities
+ that have a non-null C(agent_id).
+ choices:
+ - remote.dns
+ - remote.ftp-banner
+ - remote.http
+ - remote.imap-banner
+ - remote.mssql-banner
+ - remote.mysql-banner
+ - remote.ping
+ - remote.pop3-banner
+ - remote.postgresql-banner
+ - remote.smtp-banner
+ - remote.smtp
+ - remote.ssh
+ - remote.tcp
+ - remote.telnet-banner
+ - agent.filesystem
+ - agent.memory
+ - agent.load_average
+ - agent.cpu
+ - agent.disk
+ - agent.network
+ - agent.plugin
+ required: true
+ monitoring_zones_poll:
+ description:
+ - Comma-separated list of the names of the monitoring zones the check should
+ run from. Available monitoring zones include mzdfw, mzhkg, mziad, mzlon,
+ mzord and mzsyd. Required for remote.* checks; prohibited for agent.* checks.
+ target_hostname:
+ description:
+ - One of `target_hostname` and `target_alias` is required for remote.* checks,
+ but prohibited for agent.* checks. The hostname this check should target.
+ Must be a valid IPv4, IPv6, or FQDN.
+ target_alias:
+ description:
+ - One of `target_alias` and `target_hostname` is required for remote.* checks,
+ but prohibited for agent.* checks. Use the corresponding key in the entity's
+ `ip_addresses` hash to resolve an IP address to target.
+ details:
+ description:
+ - Additional details specific to the check type. Must be a hash of strings
+ between 1 and 255 characters long, or an array or object containing 0 to
+ 256 items.
+ disabled:
+ description:
+ - If "yes", ensure the check is created, but don't actually use it yet.
+ choices: [ "yes", "no" ]
+ metadata:
+ description:
+ - Hash of arbitrary key-value pairs to accompany this check if it fires.
+ Keys and values must be strings between 1 and 255 characters long.
+ period:
+ description:
+ - The number of seconds between each time the check is performed. Must be
+ greater than the minimum period set on your account.
+ timeout:
+ description:
+ - The number of seconds this check will wait when attempting to collect
+ results. Must be less than the period.
+author: Ash Wilson
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Create a monitoring check
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Associate a check with an existing entity.
+ rax_mon_check:
+ credentials: ~/.rax_pub
+ state: present
+ entity_id: "{{ the_entity['entity']['id'] }}"
+ label: the_check
+ check_type: remote.ping
+ monitoring_zones_poll: mziad,mzord,mzdfw
+ details:
+ count: 10
+ meta:
+ hurf: durf
+ register: the_check
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def cloud_check(module, state, entity_id, label, check_type,
+ monitoring_zones_poll, target_hostname, target_alias, details,
+ disabled, metadata, period, timeout):
+
+ # Coerce attributes.
+
+ if monitoring_zones_poll and not isinstance(monitoring_zones_poll, list):
+ monitoring_zones_poll = [monitoring_zones_poll]
+
+ if period:
+ period = int(period)
+
+ if timeout:
+ timeout = int(timeout)
+
+ changed = False
+ check = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ entity = cm.get_entity(entity_id)
+ if not entity:
+ module.fail_json(msg='Failed to instantiate entity. "%s" may not be'
+ ' a valid entity id.' % entity_id)
+
+ existing = [e for e in entity.list_checks() if e.label == label]
+
+ if existing:
+ check = existing[0]
+
+ if state == 'present':
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing checks have a label of %s.' %
+ (len(existing), label))
+
+ should_delete = False
+ should_create = False
+ should_update = False
+
+ if check:
+ # Details may include keys set to default values that are not
+ # included in the initial creation.
+ #
+ # Only force a recreation of the check if one of the *specified*
+ # keys is missing or has a different value.
+ if details:
+ for (key, value) in details.iteritems():
+ if key not in check.details:
+ should_delete = should_create = True
+ elif value != check.details[key]:
+ should_delete = should_create = True
+
+ should_update = label != check.label or \
+ (target_hostname and target_hostname != check.target_hostname) or \
+ (target_alias and target_alias != check.target_alias) or \
+ (disabled != check.disabled) or \
+ (metadata and metadata != check.metadata) or \
+ (period and period != check.period) or \
+ (timeout and timeout != check.timeout) or \
+ (monitoring_zones_poll and monitoring_zones_poll != check.monitoring_zones_poll)
+
+ if should_update and not should_delete:
+ check.update(label=label,
+ disabled=disabled,
+ metadata=metadata,
+ monitoring_zones_poll=monitoring_zones_poll,
+ timeout=timeout,
+ period=period,
+ target_alias=target_alias,
+ target_hostname=target_hostname)
+ changed = True
+ else:
+ # The check doesn't exist yet.
+ should_create = True
+
+ if should_delete:
+ check.delete()
+
+ if should_create:
+ check = cm.create_check(entity,
+ label=label,
+ check_type=check_type,
+ target_hostname=target_hostname,
+ target_alias=target_alias,
+ monitoring_zones_poll=monitoring_zones_poll,
+ details=details,
+ disabled=disabled,
+ metadata=metadata,
+ period=period,
+ timeout=timeout)
+ changed = True
+ elif state == 'absent':
+ if check:
+ check.delete()
+ changed = True
+ else:
+ module.fail_json(msg='state must be either present or absent.')
+
+ if check:
+ check_dict = {
+ "id": check.id,
+ "label": check.label,
+ "type": check.type,
+ "target_hostname": check.target_hostname,
+ "target_alias": check.target_alias,
+ "monitoring_zones_poll": check.monitoring_zones_poll,
+ "details": check.details,
+ "disabled": check.disabled,
+ "metadata": check.metadata,
+ "period": check.period,
+ "timeout": check.timeout
+ }
+ module.exit_json(changed=changed, check=check_dict)
+ else:
+ module.exit_json(changed=changed)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ entity_id=dict(required=True),
+ label=dict(required=True),
+ check_type=dict(required=True),
+ monitoring_zones_poll=dict(),
+ target_hostname=dict(),
+ target_alias=dict(),
+ details=dict(type='dict', default={}),
+ disabled=dict(type='bool', default=False),
+ metadata=dict(type='dict', default={}),
+ period=dict(type='int'),
+ timeout=dict(type='int'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ entity_id = module.params.get('entity_id')
+ label = module.params.get('label')
+ check_type = module.params.get('check_type')
+ monitoring_zones_poll = module.params.get('monitoring_zones_poll')
+ target_hostname = module.params.get('target_hostname')
+ target_alias = module.params.get('target_alias')
+ details = module.params.get('details')
+ disabled = module.boolean(module.params.get('disabled'))
+ metadata = module.params.get('metadata')
+ period = module.params.get('period')
+ timeout = module.params.get('timeout')
+
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_check(module, state, entity_id, label, check_type,
+ monitoring_zones_poll, target_hostname, target_alias, details,
+ disabled, metadata, period, timeout)
+
+
+# Import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+# Invoke the module.
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/rackspace/rax_mon_entity.py b/lib/ansible/modules/cloud/rackspace/rax_mon_entity.py
new file mode 100644
index 0000000000..fae5830965
--- /dev/null
+++ b/lib/ansible/modules/cloud/rackspace/rax_mon_entity.py
@@ -0,0 +1,197 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: rax_mon_entity
+short_description: Create or delete a Rackspace Cloud Monitoring entity
+description:
+- Create or delete a Rackspace Cloud Monitoring entity, which represents a device
+ to monitor. Entities associate checks and alarms with a target system and
+ provide a convenient, centralized place to store IP addresses. Rackspace
+ monitoring module flow | *rax_mon_entity* -> rax_mon_check ->
+ rax_mon_notification -> rax_mon_notification_plan -> rax_mon_alarm
+version_added: "2.0"
+options:
+ label:
+ description:
+ - Defines a name for this entity. Must be a non-empty string between 1 and
+ 255 characters long.
+ required: true
+ state:
+ description:
+ - Ensure that an entity with this C(name) exists or does not exist.
+ choices: ["present", "absent"]
+ agent_id:
+ description:
+ - Rackspace monitoring agent on the target device to which this entity is
+ bound. Necessary to collect C(agent.) rax_mon_checks against this entity.
+ named_ip_addresses:
+ description:
+ - Hash of IP addresses that may be referenced by name by rax_mon_checks
+ added to this entity. Must be a dictionary of with keys that are names
+ between 1 and 64 characters long, and values that are valid IPv4 or IPv6
+ addresses.
+ metadata:
+ description:
+ - Hash of arbitrary C(name), C(value) pairs that are passed to associated
+ rax_mon_alarms. Names and values must all be between 1 and 255 characters
+ long.
+author: Ash Wilson
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Entity example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Ensure an entity exists
+ rax_mon_entity:
+ credentials: ~/.rax_pub
+ state: present
+ label: my_entity
+ named_ip_addresses:
+ web_box: 192.0.2.4
+ db_box: 192.0.2.5
+ meta:
+ hurf: durf
+ register: the_entity
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def cloud_monitoring(module, state, label, agent_id, named_ip_addresses,
+ metadata):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for entity in cm.list_entities():
+ if label == entity.label:
+ existing.append(entity)
+
+ entity = None
+
+ if existing:
+ entity = existing[0]
+
+ if state == 'present':
+ should_update = False
+ should_delete = False
+ should_create = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing entities have the label %s.' %
+ (len(existing), label))
+
+ if entity:
+ if named_ip_addresses and named_ip_addresses != entity.ip_addresses:
+ should_delete = should_create = True
+
+ # Change an existing Entity, unless there's nothing to do.
+ should_update = agent_id and agent_id != entity.agent_id or \
+ (metadata and metadata != entity.metadata)
+
+ if should_update and not should_delete:
+ entity.update(agent_id, metadata)
+ changed = True
+
+ if should_delete:
+ entity.delete()
+ else:
+ should_create = True
+
+ if should_create:
+ # Create a new Entity.
+ entity = cm.create_entity(label=label, agent=agent_id,
+ ip_addresses=named_ip_addresses,
+ metadata=metadata)
+ changed = True
+ else:
+ # Delete the existing Entities.
+ for e in existing:
+ e.delete()
+ changed = True
+
+ if entity:
+ entity_dict = {
+ "id": entity.id,
+ "name": entity.name,
+ "agent_id": entity.agent_id,
+ }
+ module.exit_json(changed=changed, entity=entity_dict)
+ else:
+ module.exit_json(changed=changed)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ agent_id=dict(),
+ named_ip_addresses=dict(type='dict', default={}),
+ metadata=dict(type='dict', default={})
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ agent_id = module.params.get('agent_id')
+ named_ip_addresses = module.params.get('named_ip_addresses')
+ metadata = module.params.get('metadata')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_monitoring(module, state, label, agent_id, named_ip_addresses, metadata)
+
+# Import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+# Invoke the module.
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/rackspace/rax_mon_notification.py b/lib/ansible/modules/cloud/rackspace/rax_mon_notification.py
new file mode 100644
index 0000000000..21396e7cb0
--- /dev/null
+++ b/lib/ansible/modules/cloud/rackspace/rax_mon_notification.py
@@ -0,0 +1,181 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: rax_mon_notification
+short_description: Create or delete a Rackspace Cloud Monitoring notification.
+description:
+- Create or delete a Rackspace Cloud Monitoring notification that specifies a
+ channel that can be used to communicate alarms, such as email, webhooks, or
+ PagerDuty. Rackspace monitoring module flow | rax_mon_entity -> rax_mon_check ->
+ *rax_mon_notification* -> rax_mon_notification_plan -> rax_mon_alarm
+version_added: "2.0"
+options:
+ state:
+ description:
+ - Ensure that the notification with this C(label) exists or does not exist.
+ choices: ['present', 'absent']
+ label:
+ description:
+ - Defines a friendly name for this notification. String between 1 and 255
+ characters long.
+ required: true
+ notification_type:
+ description:
+ - A supported notification type.
+ choices: ["webhook", "email", "pagerduty"]
+ required: true
+ details:
+ description:
+ - Dictionary of key-value pairs used to initialize the notification.
+ Required keys and meanings vary with notification type. See
+ http://docs.rackspace.com/cm/api/v1.0/cm-devguide/content/
+ service-notification-types-crud.html for details.
+ required: true
+author: Ash Wilson
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Monitoring notification example
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Email me when something goes wrong.
+ rax_mon_entity:
+ credentials: ~/.rax_pub
+ label: omg
+ type: email
+ details:
+ address: me@mailhost.com
+ register: the_notification
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def notification(module, state, label, notification_type, details):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+ notification = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for n in cm.list_notifications():
+ if n.label == label:
+ existing.append(n)
+
+ if existing:
+ notification = existing[0]
+
+ if state == 'present':
+ should_update = False
+ should_delete = False
+ should_create = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s existing notifications are labelled %s.' %
+ (len(existing), label))
+
+ if notification:
+ should_delete = (notification_type != notification.type)
+
+ should_update = (details != notification.details)
+
+ if should_update and not should_delete:
+ notification.update(details=notification.details)
+ changed = True
+
+ if should_delete:
+ notification.delete()
+ else:
+ should_create = True
+
+ if should_create:
+ notification = cm.create_notification(notification_type,
+ label=label, details=details)
+ changed = True
+ else:
+ for n in existing:
+ n.delete()
+ changed = True
+
+ if notification:
+ notification_dict = {
+ "id": notification.id,
+ "type": notification.type,
+ "label": notification.label,
+ "details": notification.details
+ }
+ module.exit_json(changed=changed, notification=notification_dict)
+ else:
+ module.exit_json(changed=changed)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ notification_type=dict(required=True, choices=['webhook', 'email', 'pagerduty']),
+ details=dict(required=True, type='dict')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ notification_type = module.params.get('notification_type')
+ details = module.params.get('details')
+
+ setup_rax_module(module, pyrax)
+
+ notification(module, state, label, notification_type, details)
+
+# Import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+# Invoke the module.
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/rackspace/rax_mon_notification_plan.py b/lib/ansible/modules/cloud/rackspace/rax_mon_notification_plan.py
new file mode 100644
index 0000000000..a0b283884f
--- /dev/null
+++ b/lib/ansible/modules/cloud/rackspace/rax_mon_notification_plan.py
@@ -0,0 +1,186 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: rax_mon_notification_plan
+short_description: Create or delete a Rackspace Cloud Monitoring notification
+ plan.
+description:
+- Create or delete a Rackspace Cloud Monitoring notification plan by
+ associating existing rax_mon_notifications with severity levels. Rackspace
+ monitoring module flow | rax_mon_entity -> rax_mon_check ->
+ rax_mon_notification -> *rax_mon_notification_plan* -> rax_mon_alarm
+version_added: "2.0"
+options:
+ state:
+ description:
+ - Ensure that the notification plan with this C(label) exists or does not
+ exist.
+ choices: ['present', 'absent']
+ label:
+ description:
+ - Defines a friendly name for this notification plan. String between 1 and
+ 255 characters long.
+ required: true
+ critical_state:
+ description:
+ - Notification list to use when the alarm state is CRITICAL. Must be an
+ array of valid rax_mon_notification ids.
+ warning_state:
+ description:
+ - Notification list to use when the alarm state is WARNING. Must be an array
+ of valid rax_mon_notification ids.
+ ok_state:
+ description:
+ - Notification list to use when the alarm state is OK. Must be an array of
+ valid rax_mon_notification ids.
+author: Ash Wilson
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Example notification plan
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Establish who gets called when.
+ rax_mon_notification_plan:
+ credentials: ~/.rax_pub
+ state: present
+ label: defcon1
+ critical_state:
+ - "{{ everyone['notification']['id'] }}"
+ warning_state:
+ - "{{ opsfloor['notification']['id'] }}"
+ register: defcon1
+'''
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+def notification_plan(module, state, label, critical_state, warning_state, ok_state):
+
+ if len(label) < 1 or len(label) > 255:
+ module.fail_json(msg='label must be between 1 and 255 characters long')
+
+ changed = False
+ notification_plan = None
+
+ cm = pyrax.cloud_monitoring
+ if not cm:
+ module.fail_json(msg='Failed to instantiate client. This typically '
+ 'indicates an invalid region or an incorrectly '
+ 'capitalized region name.')
+
+ existing = []
+ for n in cm.list_notification_plans():
+ if n.label == label:
+ existing.append(n)
+
+ if existing:
+ notification_plan = existing[0]
+
+ if state == 'present':
+ should_create = False
+ should_delete = False
+
+ if len(existing) > 1:
+ module.fail_json(msg='%s notification plans are labelled %s.' %
+ (len(existing), label))
+
+ if notification_plan:
+ should_delete = (critical_state and critical_state != notification_plan.critical_state) or \
+ (warning_state and warning_state != notification_plan.warning_state) or \
+ (ok_state and ok_state != notification_plan.ok_state)
+
+ if should_delete:
+ notification_plan.delete()
+ should_create = True
+ else:
+ should_create = True
+
+ if should_create:
+ notification_plan = cm.create_notification_plan(label=label,
+ critical_state=critical_state,
+ warning_state=warning_state,
+ ok_state=ok_state)
+ changed = True
+ else:
+ for np in existing:
+ np.delete()
+ changed = True
+
+ if notification_plan:
+ notification_plan_dict = {
+ "id": notification_plan.id,
+ "critical_state": notification_plan.critical_state,
+ "warning_state": notification_plan.warning_state,
+ "ok_state": notification_plan.ok_state,
+ "metadata": notification_plan.metadata
+ }
+ module.exit_json(changed=changed, notification_plan=notification_plan_dict)
+ else:
+ module.exit_json(changed=changed)
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent']),
+ label=dict(required=True),
+ critical_state=dict(type='list'),
+ warning_state=dict(type='list'),
+ ok_state=dict(type='list')
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ label = module.params.get('label')
+ critical_state = module.params.get('critical_state')
+ warning_state = module.params.get('warning_state')
+ ok_state = module.params.get('ok_state')
+
+ setup_rax_module(module, pyrax)
+
+ notification_plan(module, state, label, critical_state, warning_state, ok_state)
+
+# Import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+# Invoke the module.
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/serverless.py b/lib/ansible/modules/cloud/serverless.py
new file mode 100644
index 0000000000..a075a2b49b
--- /dev/null
+++ b/lib/ansible/modules/cloud/serverless.py
@@ -0,0 +1,191 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2016, Ryan Scott Brown <ryansb@redhat.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: serverless
+short_description: Manages a Serverless Framework project
+description:
+ - Provides support for managing Serverless Framework (https://serverless.com/) project deployments and stacks.
+version_added: "2.3"
+options:
+ state:
+ choices: ['present', 'absent']
+ description:
+ - Goal state of given stage/project
+ required: false
+ default: present
+ service_path:
+ description:
+ - The path to the root of the Serverless Service to be operated on.
+ required: true
+ functions:
+ description:
+ - A list of specific functions to deploy. If this is not provided, all functions in the service will be deployed.
+ required: false
+ default: []
+ region:
+ description:
+ - AWS region to deploy the service to
+ required: false
+ default: us-east-1
+ deploy:
+ description:
+ - Whether or not to deploy artifacts after building them. When this option is `false` all the functions will be built, but no stack update will be run to send them out. This is mostly useful for generating artifacts to be stored/deployed elsewhere.
+ required: false
+ default: true
+notes:
+ - Currently, the `serverless` command must be in the path of the node executing the task. In the future this may be a flag.
+requirements: [ "serverless" ]
+author: "Ryan Scott Brown @ryansb"
+'''
+
+EXAMPLES = """
+# Basic deploy of a service
+- serverless:
+ service_path: '{{ project_dir }}'
+ state: present
+
+# Deploy specific functions
+- serverless:
+ service_path: '{{ project_dir }}'
+ functions:
+ - my_func_one
+ - my_func_two
+
+# deploy a project, then pull its resource list back into Ansible
+- serverless:
+ stage: dev
+ region: us-east-1
+ service_path: '{{ project_dir }}'
+ register: sls
+# The cloudformation stack is always named the same as the full service, so the
+# cloudformation_facts module can get a full list of the stack resources, as
+# well as stack events and outputs
+- cloudformation_facts:
+ region: us-east-1
+ stack_name: '{{ sls.service_name }}'
+ stack_resources: true
+"""
+
+RETURN = """
+service_name:
+ type: string
+ description: Most
+ returned: always
+ sample: my-fancy-service-dev
+state:
+ type: string
+ description: Whether the stack for the serverless project is present/absent.
+ returned: always
+command:
+ type: string
+ description: Full `serverless` command run by this module, in case you want to re-run the command outside the module.
+ returned: always
+ sample: serverless deploy --stage production
+"""
+
+
+import os
+import traceback
+import yaml
+
+
+def read_serverless_config(module):
+ path = os.path.expanduser(module.params.get('service_path'))
+
+ try:
+ with open(os.path.join(path, 'serverless.yml')) as sls_config:
+ config = yaml.safe_load(sls_config.read())
+ return config
+ except IOError as e:
+ module.fail_json(msg="Could not open serverless.yml in {}. err: {}".format(path, str(e)), exception=traceback.format_exc())
+
+ module.fail_json(msg="Failed to open serverless config at {}".format(
+ os.path.join(path, 'serverless.yml')))
+
+
+def get_service_name(module, stage):
+ config = read_serverless_config(module)
+ if config.get('service') is None:
+ module.fail_json(msg="Could not read `service` key from serverless.yml file")
+
+ if stage:
+ return "{}-{}".format(config['service'], stage)
+
+ return "{}-{}".format(config['service'], config.get('stage', 'dev'))
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ service_path = dict(required=True),
+ state = dict(default='present', choices=['present', 'absent'], required=False),
+ functions = dict(type='list', required=False),
+ region = dict(default='', required=False),
+ stage = dict(default='', required=False),
+ deploy = dict(default=True, type='bool', required=False),
+ ),
+ )
+
+ service_path = os.path.expanduser(module.params.get('service_path'))
+ state = module.params.get('state')
+ functions = module.params.get('functions')
+ region = module.params.get('region')
+ stage = module.params.get('stage')
+ deploy = module.params.get('deploy', True)
+
+ command = "serverless "
+ if state == 'present':
+ command += 'deploy '
+ elif state == 'absent':
+ command += 'remove '
+ else:
+ module.fail_json(msg="State must either be 'present' or 'absent'. Received: {}".format(state))
+
+ if not deploy and state == 'present':
+ command += '--noDeploy '
+ if region:
+ command += '--region {} '.format(region)
+ if stage:
+ command += '--stage {} '.format(stage)
+
+ rc, out, err = module.run_command(command, cwd=service_path)
+ if rc != 0:
+ if state == 'absent' and "-{}' does not exist".format(stage) in out:
+ module.exit_json(changed=False, state='absent', command=command,
+ out=out, service_name=get_service_name(module, stage))
+
+ module.fail_json(msg="Failure when executing Serverless command. Exited {}.\nstdout: {}\nstderr: {}".format(rc, out, err))
+
+ # gather some facts about the deployment
+ module.exit_json(changed=True, state='present', out=out, command=command,
+ service_name=get_service_name(module, stage))
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/smartos/__init__.py b/lib/ansible/modules/cloud/smartos/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/cloud/smartos/__init__.py
diff --git a/lib/ansible/modules/cloud/smartos/smartos_image_facts.py b/lib/ansible/modules/cloud/smartos/smartos_image_facts.py
new file mode 100644
index 0000000000..487aa3f648
--- /dev/null
+++ b/lib/ansible/modules/cloud/smartos/smartos_image_facts.py
@@ -0,0 +1,123 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Adam Å tevko <adam.stevko@gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: smartos_image_facts
+short_description: Get SmartOS image details.
+description:
+ - Retrieve facts about all installed images on SmartOS. Facts will be
+ inserted to the ansible_facts key.
+version_added: "2.2"
+author: Adam Å tevko (@xen0l)
+options:
+ filters:
+ description:
+ - Criteria for selecting image. Can be any value from image
+ manifest and 'published_date', 'published', 'source', 'clones',
+ and 'size'. More informaton can be found at U(https://smartos.org/man/1m/imgadm)
+ under 'imgadm list'.
+ required: false
+ default: None
+'''
+
+EXAMPLES = '''
+# Return facts about all installed images.
+smartos_image_facts:
+
+# Return all private active Linux images.
+smartos_image_facts: filters="os=linux state=active public=false"
+
+# Show, how many clones does every image have.
+smartos_image_facts:
+
+debug: msg="{{ smartos_images[item]['name'] }}-{{smartos_images[item]['version'] }}
+ has {{ smartos_images[item]['clones'] }} VM(s)"
+with_items: "{{ smartos_images.keys() }}"
+'''
+
+RETURN = '''
+# this module returns ansible_facts
+'''
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+
+class ImageFacts(object):
+
+ def __init__(self, module):
+ self.module = module
+
+ self.filters = module.params['filters']
+
+ def return_all_installed_images(self):
+ cmd = [self.module.get_bin_path('imgadm')]
+
+ cmd.append('list')
+ cmd.append('-j')
+
+ if self.filters:
+ cmd.append(self.filters)
+
+ (rc, out, err) = self.module.run_command(cmd)
+
+ if rc != 0:
+ self.module.exit_json(
+ msg='Failed to get all installed images', stderr=err)
+
+ images = json.loads(out)
+
+ result = {}
+ for image in images:
+ result[image['manifest']['uuid']] = image['manifest']
+ # Merge additional attributes with the image manifest.
+ for attrib in ['clones', 'source', 'zpool']:
+ result[image['manifest']['uuid']][attrib] = image[attrib]
+
+ return result
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ filters=dict(default=None),
+ ),
+ supports_check_mode=False,
+ )
+
+ image_facts = ImageFacts(module)
+
+ data = {}
+ data['smartos_images'] = image_facts.return_all_installed_images()
+
+ module.exit_json(ansible_facts=data)
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/softlayer/__init__.py b/lib/ansible/modules/cloud/softlayer/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/cloud/softlayer/__init__.py
diff --git a/lib/ansible/modules/cloud/softlayer/sl_vm.py b/lib/ansible/modules/cloud/softlayer/sl_vm.py
new file mode 100644
index 0000000000..b24c0f06fa
--- /dev/null
+++ b/lib/ansible/modules/cloud/softlayer/sl_vm.py
@@ -0,0 +1,364 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: sl_vm
+short_description: create or cancel a virtual instance in SoftLayer
+description:
+ - Creates or cancels SoftLayer instances. When created, optionally waits for it to be 'running'.
+version_added: "2.1"
+options:
+ instance_id:
+ description:
+ - Instance Id of the virtual instance to perform action option
+ required: false
+ default: null
+ hostname:
+ description:
+ - Hostname to be provided to a virtual instance
+ required: false
+ default: null
+ domain:
+ description:
+ - Domain name to be provided to a virtual instance
+ required: false
+ default: null
+ datacenter:
+ description:
+ - Datacenter for the virtual instance to be deployed
+ required: false
+ default: null
+ tags:
+ description:
+ - Tag or list of tags to be provided to a virtual instance
+ required: false
+ default: null
+ hourly:
+ description:
+ - Flag to determine if the instance should be hourly billed
+ required: false
+ default: true
+ private:
+ description:
+ - Flag to determine if the instance should be private only
+ required: false
+ default: false
+ dedicated:
+ description:
+ - Falg to determine if the instance should be deployed in dedicated space
+ required: false
+ default: false
+ local_disk:
+ description:
+ - Flag to determine if local disk should be used for the new instance
+ required: false
+ default: true
+ cpus:
+ description:
+ - Count of cpus to be assigned to new virtual instance
+ required: true
+ default: null
+ memory:
+ description:
+ - Amount of memory to be assigned to new virtual instance
+ required: true
+ default: null
+ disks:
+ description:
+ - List of disk sizes to be assigned to new virtual instance
+ required: true
+ default: [25]
+ os_code:
+ description:
+ - OS Code to be used for new virtual instance
+ required: false
+ default: null
+ image_id:
+ description:
+ - Image Template to be used for new virtual instance
+ required: false
+ default: null
+ nic_speed:
+ description:
+ - NIC Speed to be assigned to new virtual instance
+ required: false
+ default: 10
+ public_vlan:
+ description:
+ - VLAN by its Id to be assigned to the public NIC
+ required: false
+ default: null
+ private_vlan:
+ description:
+ - VLAN by its Id to be assigned to the private NIC
+ required: false
+ default: null
+ ssh_keys:
+ description:
+ - List of ssh keys by their Id to be assigned to a virtual instance
+ required: false
+ default: null
+ post_uri:
+ description:
+ - URL of a post provisioning script ot be loaded and exectued on virtual instance
+ required: false
+ default: null
+ state:
+ description:
+ - Create, or cancel a virtual instance. Specify "present" for create, "absent" to cancel.
+ required: false
+ default: 'present'
+ wait:
+ description:
+ - Flag used to wait for active status before returning
+ required: false
+ default: true
+ wait_timeout:
+ description:
+ - time in seconds before wait returns
+ required: false
+ default: 600
+
+requirements:
+ - "python >= 2.6"
+ - "softlayer >= 4.1.1"
+author: "Matt Colton (@mcltn)"
+'''
+
+EXAMPLES = '''
+- name: Build instance
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Build instance request
+ local_action:
+ module: sl_vm
+ hostname: instance-1
+ domain: anydomain.com
+ datacenter: dal09
+ tags: ansible-module-test
+ hourly: True
+ private: False
+ dedicated: False
+ local_disk: True
+ cpus: 1
+ memory: 1024
+ disks: [25]
+ os_code: UBUNTU_LATEST
+ wait: False
+
+- name: Build additional instances
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Build instances request
+ local_action:
+ module: sl_vm
+ hostname: "{{ item.hostname }}"
+ domain: "{{ item.domain }}"
+ datacenter: "{{ item.datacenter }}"
+ tags: "{{ item.tags }}"
+ hourly: "{{ item.hourly }}"
+ private: "{{ item.private }}"
+ dedicated: "{{ item.dedicated }}"
+ local_disk: "{{ item.local_disk }}"
+ cpus: "{{ item.cpus }}"
+ memory: "{{ item.memory }}"
+ disks: "{{ item.disks }}"
+ os_code: "{{ item.os_code }}"
+ ssh_keys: "{{ item.ssh_keys }}"
+ wait: "{{ item.wait }}"
+ with_items:
+ - { hostname: 'instance-2', domain: 'anydomain.com', datacenter: 'dal09', tags: ['ansible-module-test', 'ansible-module-test-slaves'], hourly: True, private: False, dedicated: False, local_disk: True, cpus: 1, memory: 1024, disks: [25,100], os_code: 'UBUNTU_LATEST', ssh_keys: [], wait: True }
+ - { hostname: 'instance-3', domain: 'anydomain.com', datacenter: 'dal09', tags: ['ansible-module-test', 'ansible-module-test-slaves'], hourly: True, private: False, dedicated: False, local_disk: True, cpus: 1, memory: 1024, disks: [25,100], os_code: 'UBUNTU_LATEST', ssh_keys: [], wait: True }
+
+
+- name: Cancel instances
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: Cancel by tag
+ local_action:
+ module: sl_vm
+ state: absent
+ tags: ansible-module-test
+'''
+
+# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed.
+RETURN = '''# '''
+
+import time
+
+#TODO: get this info from API
+STATES = ['present', 'absent']
+DATACENTERS = ['ams01','ams03','che01','dal01','dal05','dal06','dal09','dal10','fra02','hkg02','hou02','lon02','mel01','mex01','mil01','mon01','osl01','par01','sjc01','sjc03','sao01','sea01','sng01','syd01','tok02','tor01','wdc01','wdc04']
+CPU_SIZES = [1,2,4,8,16,32,56]
+MEMORY_SIZES = [1024,2048,4096,6144,8192,12288,16384,32768,49152,65536,131072,247808]
+INITIALDISK_SIZES = [25,100]
+LOCALDISK_SIZES = [25,100,150,200,300]
+SANDISK_SIZES = [10,20,25,30,40,50,75,100,125,150,175,200,250,300,350,400,500,750,1000,1500,2000]
+NIC_SPEEDS = [10,100,1000]
+
+try:
+ import SoftLayer
+ from SoftLayer import VSManager
+
+ HAS_SL = True
+ vsManager = VSManager(SoftLayer.create_client_from_env())
+except ImportError:
+ HAS_SL = False
+
+
+def create_virtual_instance(module):
+
+ instances = vsManager.list_instances(
+ hostname = module.params.get('hostname'),
+ domain = module.params.get('domain'),
+ datacenter = module.params.get('datacenter')
+ )
+
+ if instances:
+ return False, None
+
+
+ # Check if OS or Image Template is provided (Can't be both, defaults to OS)
+ if (module.params.get('os_code') != None and module.params.get('os_code') != ''):
+ module.params['image_id'] = ''
+ elif (module.params.get('image_id') != None and module.params.get('image_id') != ''):
+ module.params['os_code'] = ''
+ module.params['disks'] = [] # Blank out disks since it will use the template
+ else:
+ return False, None
+
+ tags = module.params.get('tags')
+ if isinstance(tags, list):
+ tags = ','.join(map(str, module.params.get('tags')))
+
+ instance = vsManager.create_instance(
+ hostname = module.params.get('hostname'),
+ domain = module.params.get('domain'),
+ cpus = module.params.get('cpus'),
+ memory = module.params.get('memory'),
+ hourly = module.params.get('hourly'),
+ datacenter = module.params.get('datacenter'),
+ os_code = module.params.get('os_code'),
+ image_id = module.params.get('image_id'),
+ local_disk = module.params.get('local_disk'),
+ disks = module.params.get('disks'),
+ ssh_keys = module.params.get('ssh_keys'),
+ nic_speed = module.params.get('nic_speed'),
+ private = module.params.get('private'),
+ public_vlan = module.params.get('public_vlan'),
+ private_vlan = module.params.get('private_vlan'),
+ dedicated = module.params.get('dedicated'),
+ post_uri = module.params.get('post_uri'),
+ tags = tags)
+
+ if instance != None and instance['id'] > 0:
+ return True, instance
+ else:
+ return False, None
+
+
+def wait_for_instance(module,id):
+ instance = None
+ completed = False
+ wait_timeout = time.time() + module.params.get('wait_time')
+ while not completed and wait_timeout > time.time():
+ try:
+ completed = vsManager.wait_for_ready(id, 10, 2)
+ if completed:
+ instance = vsManager.get_instance(id)
+ except:
+ completed = False
+
+ return completed, instance
+
+
+def cancel_instance(module):
+ canceled = True
+ if module.params.get('instance_id') == None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')):
+ tags = module.params.get('tags')
+ if isinstance(tags, basestring):
+ tags = [module.params.get('tags')]
+ instances = vsManager.list_instances(tags = tags, hostname = module.params.get('hostname'), domain = module.params.get('domain'))
+ for instance in instances:
+ try:
+ vsManager.cancel_instance(instance['id'])
+ except:
+ canceled = False
+ elif module.params.get('instance_id') and module.params.get('instance_id') != 0:
+ try:
+ vsManager.cancel_instance(instance['id'])
+ except:
+ canceled = False
+ else:
+ return False, None
+
+ return canceled, None
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ instance_id=dict(),
+ hostname=dict(),
+ domain=dict(),
+ datacenter=dict(choices=DATACENTERS),
+ tags=dict(),
+ hourly=dict(type='bool', default=True),
+ private=dict(type='bool', default=False),
+ dedicated=dict(type='bool', default=False),
+ local_disk=dict(type='bool', default=True),
+ cpus=dict(type='int', choices=CPU_SIZES),
+ memory=dict(type='int', choices=MEMORY_SIZES),
+ disks=dict(type='list', default=[25]),
+ os_code=dict(),
+ image_id=dict(),
+ nic_speed=dict(type='int', choices=NIC_SPEEDS),
+ public_vlan=dict(),
+ private_vlan=dict(),
+ ssh_keys=dict(type='list', default=[]),
+ post_uri=dict(),
+ state=dict(default='present', choices=STATES),
+ wait=dict(type='bool', default=True),
+ wait_time=dict(type='int', default=600)
+ )
+ )
+
+ if not HAS_SL:
+ module.fail_json(msg='softlayer python library required for this module')
+
+ if module.params.get('state') == 'absent':
+ (changed, instance) = cancel_instance(module)
+
+ elif module.params.get('state') == 'present':
+ (changed, instance) = create_virtual_instance(module)
+ if module.params.get('wait') == True and instance:
+ (changed, instance) = wait_for_instance(module, instance['id'])
+
+ module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__)))
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vca_fw.py b/lib/ansible/modules/cloud/vmware/vca_fw.py
new file mode 100644
index 0000000000..78cebbb012
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vca_fw.py
@@ -0,0 +1,249 @@
+#!/usr/bin/python
+
+# Copyright (c) 2015 VMware, Inc. All Rights Reserved.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vca_fw
+short_description: add remove firewall rules in a gateway in a vca
+description:
+ - Adds or removes firewall rules from a gateway in a vca environment
+version_added: "2.0"
+author: Peter Sprygada (@privateip)
+options:
+ fw_rules:
+ description:
+ - A list of firewall rules to be added to the gateway, Please see examples on valid entries
+ required: True
+ default: false
+extends_documentation_fragment: vca.documentation
+'''
+
+EXAMPLES = '''
+
+#Add a set of firewall rules
+
+- hosts: localhost
+ connection: local
+ tasks:
+ - vca_fw:
+ instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
+ vdc_name: 'benz_ansible'
+ state: 'absent'
+ fw_rules:
+ - description: "ben testing"
+ source_ip: "Any"
+ dest_ip: 192.0.2.23
+ - description: "ben testing 2"
+ source_ip: 192.0.2.50
+ source_port: "Any"
+ dest_port: "22"
+ dest_ip: 192.0.2.101
+ is_enable: "true"
+ enable_logging: "false"
+ protocol: "Tcp"
+ policy: "allow"
+
+'''
+
+try:
+ from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import FirewallRuleType
+ from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import ProtocolsType
+except ImportError:
+ # normally set a flag here but it will be caught when testing for
+ # the existence of pyvcloud (see module_utils/vca.py). This just
+ # protects against generating an exception at runtime
+ pass
+
+VALID_PROTO = ['Tcp', 'Udp', 'Icmp', 'Other', 'Any']
+VALID_RULE_KEYS = ['policy', 'is_enable', 'enable_logging', 'description',
+ 'dest_ip', 'dest_port', 'source_ip', 'source_port',
+ 'protocol']
+
+def protocol_to_tuple(protocol):
+ return (protocol.get_Tcp(),
+ protocol.get_Udp(),
+ protocol.get_Icmp(),
+ protocol.get_Other(),
+ protocol.get_Any())
+
+def protocol_to_string(protocol):
+ protocol = protocol_to_tuple(protocol)
+ if protocol[0] is True:
+ return 'Tcp'
+ elif protocol[1] is True:
+ return 'Udp'
+ elif protocol[2] is True:
+ return 'Icmp'
+ elif protocol[3] is True:
+ return 'Other'
+ elif protocol[4] is True:
+ return 'Any'
+
+def protocol_to_type(protocol):
+ try:
+ protocols = ProtocolsType()
+ setattr(protocols, protocol, True)
+ return protocols
+ except AttributeError:
+ raise VcaError("The value in protocol is not valid")
+
+def validate_fw_rules(fw_rules):
+ for rule in fw_rules:
+ for k in rule.keys():
+ if k not in VALID_RULE_KEYS:
+ raise VcaError("%s is not a valid key in fw rules, please "
+ "check above.." % k, valid_keys=VALID_RULE_KEYS)
+
+ rule['dest_port'] = str(rule.get('dest_port', 'Any')).lower()
+ rule['dest_ip'] = rule.get('dest_ip', 'Any').lower()
+ rule['source_port'] = str(rule.get('source_port', 'Any')).lower()
+ rule['source_ip'] = rule.get('source_ip', 'Any').lower()
+ rule['protocol'] = rule.get('protocol', 'Any').lower()
+ rule['policy'] = rule.get('policy', 'allow').lower()
+ rule['is_enable'] = rule.get('is_enable', True)
+ rule['enable_logging'] = rule.get('enable_logging', False)
+ rule['description'] = rule.get('description', 'rule added by Ansible')
+
+ return fw_rules
+
+def fw_rules_to_dict(rules):
+ fw_rules = list()
+ for rule in rules:
+ fw_rules.append(
+ dict(
+ dest_port=rule.get_DestinationPortRange().lower(),
+ dest_ip=rule.get_DestinationIp().lower().lower(),
+ source_port=rule.get_SourcePortRange().lower(),
+ source_ip=rule.get_SourceIp().lower(),
+ protocol=protocol_to_string(rule.get_Protocols()).lower(),
+ policy=rule.get_Policy().lower(),
+ is_enable=rule.get_IsEnabled(),
+ enable_logging=rule.get_EnableLogging(),
+ description=rule.get_Description()
+ )
+ )
+ return fw_rules
+
+def create_fw_rule(is_enable, description, policy, protocol, dest_port,
+ dest_ip, source_port, source_ip, enable_logging):
+
+ return FirewallRuleType(IsEnabled=is_enable,
+ Description=description,
+ Policy=policy,
+ Protocols=protocol_to_type(protocol),
+ DestinationPortRange=dest_port,
+ DestinationIp=dest_ip,
+ SourcePortRange=source_port,
+ SourceIp=source_ip,
+ EnableLogging=enable_logging)
+
+def main():
+ argument_spec = vca_argument_spec()
+ argument_spec.update(
+ dict(
+ fw_rules = dict(required=True, type='list'),
+ gateway_name = dict(default='gateway'),
+ state = dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ fw_rules = module.params.get('fw_rules')
+ gateway_name = module.params.get('gateway_name')
+ vdc_name = module.params['vdc_name']
+
+ vca = vca_login(module)
+
+ gateway = vca.get_gateway(vdc_name, gateway_name)
+ if not gateway:
+ module.fail_json(msg="Not able to find the gateway %s, please check "
+ "the gateway_name param" % gateway_name)
+
+ fwservice = gateway._getFirewallService()
+
+ rules = gateway.get_fw_rules()
+ current_rules = fw_rules_to_dict(rules)
+
+ try:
+ desired_rules = validate_fw_rules(fw_rules)
+ except VcaError as e:
+ module.fail_json(msg=e.message)
+
+ result = dict(changed=False)
+ result['current_rules'] = current_rules
+ result['desired_rules'] = desired_rules
+
+ updates = list()
+ additions = list()
+ deletions = list()
+
+ for (index, rule) in enumerate(desired_rules):
+ try:
+ if rule != current_rules[index]:
+ updates.append((index, rule))
+ except IndexError:
+ additions.append(rule)
+
+ eol = len(current_rules) > len(desired_rules)
+ if eol > 0:
+ for rule in current_rules[eos:]:
+ deletions.append(rule)
+
+ for rule in additions:
+ if not module.check_mode:
+ rule['protocol'] = rule['protocol'].capitalize()
+ gateway.add_fw_rule(**rule)
+ result['changed'] = True
+
+ for index, rule in updates:
+ if not module.check_mode:
+ rule = create_fw_rule(**rule)
+ fwservice.replace_FirewallRule_at(index, rule)
+ result['changed'] = True
+
+ keys = ['protocol', 'dest_port', 'dest_ip', 'source_port', 'source_ip']
+ for rule in deletions:
+ if not module.check_mode:
+ kwargs = dict([(k, v) for k, v in rule.items() if k in keys])
+ kwargs['protocol'] = protocol_to_string(kwargs['protocol'])
+ gateway.delete_fw_rule(**kwargs)
+ result['changed'] = True
+
+ if not module.check_mode and result['changed'] == True:
+ task = gateway.save_services_configuration()
+ if task:
+ vca.block_until_completed(task)
+
+ result['rules_updated'] = count=len(updates)
+ result['rules_added'] = count=len(additions)
+ result['rules_deleted'] = count=len(deletions)
+
+ return module.exit_json(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.vca import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vca_nat.py b/lib/ansible/modules/cloud/vmware/vca_nat.py
new file mode 100644
index 0000000000..64771da692
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vca_nat.py
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+
+# Copyright (c) 2015 VMware, Inc. All Rights Reserved.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vca_nat
+short_description: add remove nat rules in a gateway in a vca
+description:
+ - Adds or removes nat rules from a gateway in a vca environment
+version_added: "2.0"
+author: Peter Sprygada (@privateip)
+options:
+ purge_rules:
+ description:
+ - If set to true, it will delete all rules in the gateway that are not given as paramter to this module.
+ required: false
+ default: false
+ nat_rules:
+ description:
+ - A list of rules to be added to the gateway, Please see examples on valid entries
+ required: True
+ default: false
+extends_documentation_fragment: vca.documentation
+'''
+
+EXAMPLES = '''
+
+#An example for a source nat
+
+- hosts: localhost
+ connection: local
+ tasks:
+ - vca_nat:
+ instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
+ vdc_name: 'benz_ansible'
+ state: 'present'
+ nat_rules:
+ - rule_type: SNAT
+ original_ip: 192.0.2.42
+ translated_ip: 203.0.113.23
+
+#example for a DNAT
+- hosts: localhost
+ connection: local
+ tasks:
+ - vca_nat:
+ instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
+ vdc_name: 'benz_ansible'
+ state: 'present'
+ nat_rules:
+ - rule_type: DNAT
+ original_ip: 203.0.113.23
+ original_port: 22
+ translated_ip: 192.0.2.42
+ translated_port: 22
+
+'''
+
+import time
+import xmltodict
+
+VALID_RULE_KEYS = ['rule_type', 'original_ip', 'original_port',
+ 'translated_ip', 'translated_port', 'protocol']
+
+
+def validate_nat_rules(nat_rules):
+ for rule in nat_rules:
+ if not isinstance(rule, dict):
+ raise VcaError("nat rules must be a list of dictionaries, "
+ "Please check", valid_keys=VALID_RULE_KEYS)
+
+ for k in rule.keys():
+ if k not in VALID_RULE_KEYS:
+ raise VcaError("%s is not a valid key in nat rules, please "
+ "check above.." % k, valid_keys=VALID_RULE_KEYS)
+
+ rule['original_port'] = str(rule.get('original_port', 'any')).lower()
+ rule['original_ip'] = rule.get('original_ip', 'any').lower()
+ rule['translated_ip'] = rule.get('translated_ip', 'any').lower()
+ rule['translated_port'] = str(rule.get('translated_port', 'any')).lower()
+ rule['protocol'] = rule.get('protocol', 'any').lower()
+ rule['rule_type'] = rule.get('rule_type', 'DNAT').lower()
+
+ return nat_rules
+
+
+def nat_rules_to_dict(nat_rules):
+ result = []
+ for rule in nat_rules:
+ gw_rule = rule.get_GatewayNatRule()
+ result.append(
+ dict(
+ rule_type=rule.get_RuleType().lower(),
+ original_ip=gw_rule.get_OriginalIp().lower(),
+ original_port=(gw_rule.get_OriginalPort().lower() or 'any'),
+ translated_ip=gw_rule.get_TranslatedIp().lower(),
+ translated_port=(gw_rule.get_TranslatedPort().lower() or 'any'),
+ protocol=(gw_rule.get_Protocol().lower() or 'any')
+ )
+ )
+ return result
+
+def rule_to_string(rule):
+ strings = list()
+ for key, value in rule.items():
+ strings.append('%s=%s' % (key, value))
+ return ', '.join(string)
+
+def main():
+ argument_spec = vca_argument_spec()
+ argument_spec.update(
+ dict(
+ nat_rules = dict(type='list', default=[]),
+ gateway_name = dict(default='gateway'),
+ purge_rules = dict(default=False, type='bool'),
+ state = dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(argument_spec, supports_check_mode=True)
+
+ vdc_name = module.params.get('vdc_name')
+ state = module.params['state']
+ nat_rules = module.params['nat_rules']
+ gateway_name = module.params['gateway_name']
+ purge_rules = module.params['purge_rules']
+
+ if not purge_rules and not nat_rules:
+ module.fail_json(msg='Must define purge_rules or nat_rules')
+
+ vca = vca_login(module)
+
+ gateway = vca.get_gateway(vdc_name, gateway_name)
+ if not gateway:
+ module.fail_json(msg="Not able to find the gateway %s, please check "
+ "the gateway_name param" % gateway_name)
+
+ try:
+ desired_rules = validate_nat_rules(nat_rules)
+ except VcaError as e:
+ module.fail_json(msg=e.message)
+
+ rules = gateway.get_nat_rules()
+
+ result = dict(changed=False, rules_purged=0)
+
+ deletions = 0
+ additions = 0
+
+ if purge_rules is True and len(rules) > 0:
+ result['rules_purged'] = len(rules)
+ deletions = result['rules_purged']
+ rules = list()
+ if not module.check_mode:
+ gateway.del_all_nat_rules()
+ task = gateway.save_services_configuration()
+ vca.block_until_completed(task)
+ rules = gateway.get_nat_rules()
+ result['changed'] = True
+
+ current_rules = nat_rules_to_dict(rules)
+
+ result['current_rules'] = current_rules
+ result['desired_rules'] = desired_rules
+
+ for rule in desired_rules:
+ if rule not in current_rules:
+ additions += 1
+ if not module.check_mode:
+ gateway.add_nat_rule(**rule)
+ result['changed'] = True
+ result['rules_added'] = additions
+
+ result['delete_rule'] = list()
+ result['delete_rule_rc'] = list()
+ for rule in current_rules:
+ if rule not in desired_rules:
+ deletions += 1
+ if not module.check_mode:
+ result['delete_rule'].append(rule)
+ rc = gateway.del_nat_rule(**rule)
+ result['delete_rule_rc'].append(rc)
+ result['changed'] = True
+ result['rules_deleted'] = deletions
+
+ if not module.check_mode and (additions > 0 or deletions > 0):
+ task = gateway.save_services_configuration()
+ vca.block_until_completed(task)
+
+ module.exit_json(**result)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.vca import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vca_vapp.py b/lib/ansible/modules/cloud/vmware/vca_vapp.py
new file mode 100644
index 0000000000..4ebdda24d6
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vca_vapp.py
@@ -0,0 +1,286 @@
+#!/usr/bin/python
+
+# Copyright (c) 2015 Ansible, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vca_vapp
+short_description: Manages vCloud Air vApp instances.
+description:
+ - This module will actively managed vCloud Air vApp instances. Instances
+ can be created and deleted as well as both deployed and undeployed.
+version_added: "2.0"
+author: Peter Sprygada (@privateip)
+options:
+ vapp_name:
+ description:
+ - The name of the vCloud Air vApp instance
+ required: yes
+ template_name:
+ description:
+ - The name of the vApp template to use to create the vApp instance. If
+ the I(state) is not `absent` then the I(template_name) value must be
+ provided. The I(template_name) must be previously uploaded to the
+ catalog specified by I(catalog_name)
+ required: no
+ default: None
+ network_name:
+ description:
+ - The name of the network that should be attached to the virtual machine
+ in the vApp. The virtual network specified must already be created in
+ the vCloud Air VDC. If the I(state) is not 'absent' then the
+ I(network_name) argument must be provided.
+ required: no
+ default: None
+ network_mode:
+ description:
+ - Configures the mode of the network connection.
+ required: no
+ default: pool
+ choices: ['pool', 'dhcp', 'static']
+ vm_name:
+ description:
+ - The name of the virtual machine instance in the vApp to manage.
+ required: no
+ default: None
+ vm_cpus:
+ description:
+ - The number of vCPUs to configure for the VM in the vApp. If the
+ I(vm_name) argument is provided, then this becomes a per VM setting
+ otherwise it is applied to all VMs in the vApp.
+ required: no
+ default: None
+ vm_memory:
+ description:
+ - The amount of memory in MB to allocate to VMs in the vApp. If the
+ I(vm_name) argument is provided, then this becomes a per VM setting
+ otherise it is applied to all VMs in the vApp.
+ required: no
+ default: None
+ operation:
+ description:
+ - Specifies an operation to be performed on the vApp.
+ required: no
+ default: noop
+ choices: ['noop', 'poweron', 'poweroff', 'suspend', 'shutdown', 'reboot', 'reset']
+ state:
+ description:
+ - Configures the state of the vApp.
+ required: no
+ default: present
+ choices: ['present', 'absent', 'deployed', 'undeployed']
+ username:
+ description:
+ - The vCloud Air username to use during authentication
+ required: false
+ default: None
+ password:
+ description:
+ - The vCloud Air password to use during authentication
+ required: false
+ default: None
+ org:
+ description:
+ - The org to login to for creating vapp, mostly set when the service_type is vdc.
+ required: false
+ default: None
+ instance_id:
+ description:
+ - The instance id in a vchs environment to be used for creating the vapp
+ required: false
+ default: None
+ host:
+ description:
+ - The authentication host to be used when service type is vcd.
+ required: false
+ default: None
+ api_version:
+ description:
+ - The api version to be used with the vca
+ required: false
+ default: "5.7"
+ service_type:
+ description:
+ - The type of service we are authenticating against
+ required: false
+ default: vca
+ choices: [ "vca", "vchs", "vcd" ]
+ vdc_name:
+ description:
+ - The name of the virtual data center (VDC) where the vm should be created or contains the vAPP.
+ required: false
+ default: None
+'''
+
+EXAMPLES = '''
+
+- name: Creates a new vApp in a VCA instance
+ vca_vapp:
+ vapp_name: tower
+ state=present
+ template_name='Ubuntu Server 12.04 LTS (amd64 20150127)'
+ vdc_name=VDC1
+ instance_id=<your instance id here>
+ username=<your username here>
+ password=<your password here>
+
+'''
+
+DEFAULT_VAPP_OPERATION = 'noop'
+
+VAPP_STATUS = {
+ 'Powered off': 'poweroff',
+ 'Powered on': 'poweron',
+ 'Suspended': 'suspend'
+}
+
+VAPP_STATES = ['present', 'absent', 'deployed', 'undeployed']
+VAPP_OPERATIONS = ['poweron', 'poweroff', 'suspend', 'shutdown',
+ 'reboot', 'reset', 'noop']
+
+
+def get_instance(module):
+ vapp_name = module.params['vapp_name']
+ inst = dict(vapp_name=vapp_name, state='absent')
+ try:
+ vapp = module.get_vapp(vapp_name)
+ if vapp:
+ status = module.vca.get_status(vapp.me.get_status())
+ inst['status'] = VAPP_STATUS.get(status, 'unknown')
+ inst['state'] = 'deployed' if vapp.me.deployed else 'undeployed'
+ return inst
+ except VcaError:
+ return inst
+
+def create(module):
+ vdc_name = module.params['vdc_name']
+ vapp_name = module.params['vapp_name']
+ template_name = module.params['template_name']
+ catalog_name = module.params['catalog_name']
+ network_name = module.params['network_name']
+ network_mode = module.params['network_mode']
+ vm_name = module.params['vm_name']
+ vm_cpus = module.params['vm_cpus']
+ vm_memory = module.params['vm_memory']
+ deploy = module.params['state'] == 'deploy'
+ poweron = module.params['operation'] == 'poweron'
+
+ task = module.vca.create_vapp(vdc_name, vapp_name, template_name,
+ catalog_name, network_name, network_mode,
+ vm_name, vm_cpus, vm_memory, deploy, poweron)
+
+ module.vca.block_until_completed(task)
+
+def delete(module):
+ vdc_name = module.params['vdc_name']
+ vapp_name = module.params['vapp_name']
+ module.vca.delete_vapp(vdc_name, vapp_name)
+
+def do_operation(module):
+ vapp_name = module.params['vapp_name']
+ operation = module.params['operation']
+
+ vm_name = module.params.get('vm_name')
+ vm = None
+ if vm_name:
+ vm = module.get_vm(vapp_name, vm_name)
+
+ if operation == 'poweron':
+ operation = 'powerOn'
+ elif operation == 'poweroff':
+ operation = 'powerOff'
+
+ cmd = 'power:%s' % operation
+ module.get_vapp(vapp_name).execute(cmd, 'post', targetVM=vm)
+
+def set_state(module):
+ state = module.params['state']
+ vapp = module.get_vapp(module.params['vapp_name'])
+ if state == 'deployed':
+ action = module.params['operation'] == 'poweron'
+ if not vapp.deploy(action):
+ module.fail('unable to deploy vapp')
+ elif state == 'undeployed':
+ action = module.params['operation']
+ if action == 'poweroff':
+ action = 'powerOff'
+ elif action != 'suspend':
+ action = None
+ if not vapp.undeploy(action):
+ module.fail('unable to undeploy vapp')
+
+
+def main():
+
+ argument_spec = dict(
+ vapp_name=dict(required=True),
+ vdc_name=dict(required=True),
+ template_name=dict(),
+ catalog_name=dict(default='Public Catalog'),
+ network_name=dict(),
+ network_mode=dict(default='pool', choices=['dhcp', 'static', 'pool']),
+ vm_name=dict(),
+ vm_cpus=dict(),
+ vm_memory=dict(),
+ operation=dict(default=DEFAULT_VAPP_OPERATION, choices=VAPP_OPERATIONS),
+ state=dict(default='present', choices=VAPP_STATES)
+ )
+
+ module = VcaAnsibleModule(argument_spec=argument_spec,
+ supports_check_mode=True)
+
+ state = module.params['state']
+ operation = module.params['operation']
+
+ instance = get_instance(module)
+
+ result = dict(changed=False)
+
+ if instance and state == 'absent':
+ if not module.check_mode:
+ delete(module)
+ result['changed'] = True
+
+ elif state != 'absent':
+ if instance['state'] == 'absent':
+ if not module.check_mode:
+ create(module)
+ result['changed'] = True
+
+ elif instance['state'] != state and state != 'present':
+ if not module.check_mode:
+ set_state(module)
+ result['changed'] = True
+
+ if operation != instance.get('status') and operation != 'noop':
+ if not module.check_mode:
+ do_operation(module)
+ result['changed'] = True
+
+ return module.exit(**result)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.vca import *
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_cluster.py b/lib/ansible/modules/cloud/vmware/vmware_cluster.py
new file mode 100644
index 0000000000..5fd986d52b
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_cluster.py
@@ -0,0 +1,255 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_cluster
+short_description: Create VMware vSphere Cluster
+description:
+ - Create VMware vSphere Cluster
+version_added: 2.0
+author: Joseph Callen (@jcpowermac)
+notes:
+requirements:
+ - Tested on ESXi 5.5
+ - PyVmomi installed
+options:
+ datacenter_name:
+ description:
+ - The name of the datacenter the cluster will be created in.
+ required: True
+ cluster_name:
+ description:
+ - The name of the cluster that will be created
+ required: True
+ enable_ha:
+ description:
+ - If set to True will enable HA when the cluster is created.
+ required: False
+ default: False
+ enable_drs:
+ description:
+ - If set to True will enable DRS when the cluster is created.
+ required: False
+ default: False
+ enable_vsan:
+ description:
+ - If set to True will enable vSAN when the cluster is created.
+ required: False
+ default: False
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example vmware_cluster command from Ansible Playbooks
+- name: Create Cluster
+ local_action: >
+ vmware_cluster
+ hostname="{{ ansible_ssh_host }}" username=root password=vmware
+ datacenter_name="datacenter"
+ cluster_name="cluster"
+ enable_ha=True
+ enable_drs=True
+ enable_vsan=True
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+class VMwareCluster(object):
+ def __init__(self, module):
+ self.module = module
+ self.enable_ha = module.params['enable_ha']
+ self.enable_drs = module.params['enable_drs']
+ self.enable_vsan = module.params['enable_vsan']
+ self.cluster_name = module.params['cluster_name']
+ self.desired_state = module.params['state']
+ self.datacenter = None
+ self.cluster = None
+ self.content = connect_to_api(module)
+ self.datacenter_name = module.params['datacenter_name']
+
+ def process_state(self):
+ cluster_states = {
+ 'absent': {
+ 'present': self.state_destroy_cluster,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'update': self.state_update_cluster,
+ 'present': self.state_exit_unchanged,
+ 'absent': self.state_create_cluster,
+ }
+ }
+ current_state = self.check_cluster_configuration()
+ # Based on the desired_state and the current_state call
+ # the appropriate method from the dictionary
+ cluster_states[self.desired_state][current_state]()
+
+ def configure_ha(self):
+ das_config = vim.cluster.DasConfigInfo()
+ das_config.enabled = self.enable_ha
+ das_config.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy()
+ das_config.admissionControlPolicy.failoverLevel = 2
+ return das_config
+
+ def configure_drs(self):
+ drs_config = vim.cluster.DrsConfigInfo()
+ drs_config.enabled = self.enable_drs
+ # Set to partially automated
+ drs_config.vmotionRate = 3
+ return drs_config
+
+ def configure_vsan(self):
+ vsan_config = vim.vsan.cluster.ConfigInfo()
+ vsan_config.enabled = self.enable_vsan
+ vsan_config.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo()
+ vsan_config.defaultConfig.autoClaimStorage = False
+ return vsan_config
+
+ def state_create_cluster(self):
+ try:
+ cluster_config_spec = vim.cluster.ConfigSpecEx()
+ cluster_config_spec.dasConfig = self.configure_ha()
+ cluster_config_spec.drsConfig = self.configure_drs()
+ if self.enable_vsan:
+ cluster_config_spec.vsanConfig = self.configure_vsan()
+ if not self.module.check_mode:
+ self.datacenter.hostFolder.CreateClusterEx(self.cluster_name, cluster_config_spec)
+ self.module.exit_json(changed=True)
+ except vim.fault.DuplicateName:
+ self.module.fail_json(msg="A cluster with the name %s already exists" % self.cluster_name)
+ except vmodl.fault.InvalidArgument:
+ self.module.fail_json(msg="Cluster configuration specification parameter is invalid")
+ except vim.fault.InvalidName:
+ self.module.fail_json(msg="%s is an invalid name for a cluster" % self.cluster_name)
+ except vmodl.fault.NotSupported:
+ # This should never happen
+ self.module.fail_json(msg="Trying to create a cluster on an incorrect folder object")
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ # This should never happen either
+ self.module.fail_json(msg=method_fault.msg)
+
+ def state_destroy_cluster(self):
+ changed = True
+ result = None
+
+ try:
+ if not self.module.check_mode:
+ task = self.cluster.Destroy_Task()
+ changed, result = wait_for_task(task)
+ self.module.exit_json(changed=changed, result=result)
+ except vim.fault.VimFault as vim_fault:
+ self.module.fail_json(msg=vim_fault.msg)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def state_update_cluster(self):
+ cluster_config_spec = vim.cluster.ConfigSpecEx()
+ changed = True
+ result = None
+
+ if self.cluster.configurationEx.dasConfig.enabled != self.enable_ha:
+ cluster_config_spec.dasConfig = self.configure_ha()
+ if self.cluster.configurationEx.drsConfig.enabled != self.enable_drs:
+ cluster_config_spec.drsConfig = self.configure_drs()
+ if self.cluster.configurationEx.vsanConfigInfo.enabled != self.enable_vsan:
+ cluster_config_spec.vsanConfig = self.configure_vsan()
+
+ try:
+ if not self.module.check_mode:
+ task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
+ changed, result = wait_for_task(task)
+ self.module.exit_json(changed=changed, result=result)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except TaskError as task_e:
+ self.module.fail_json(msg=str(task_e))
+
+ def check_cluster_configuration(self):
+ try:
+ self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
+ if self.datacenter is None:
+ self.module.fail_json(msg="Datacenter %s does not exist, "
+ "please create first with Ansible Module vmware_datacenter or manually."
+ % self.datacenter_name)
+ self.cluster = find_cluster_by_name_datacenter(self.datacenter, self.cluster_name)
+
+ if self.cluster is None:
+ return 'absent'
+ else:
+ desired_state = (self.enable_ha,
+ self.enable_drs,
+ self.enable_vsan)
+
+ current_state = (self.cluster.configurationEx.dasConfig.enabled,
+ self.cluster.configurationEx.drsConfig.enabled,
+ self.cluster.configurationEx.vsanConfigInfo.enabled)
+
+ if cmp(desired_state, current_state) != 0:
+ return 'update'
+ else:
+ return 'present'
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(datacenter_name=dict(required=True, type='str'),
+ cluster_name=dict(required=True, type='str'),
+ enable_ha=dict(default=False, required=False, type='bool'),
+ enable_drs=dict(default=False, required=False, type='bool'),
+ enable_vsan=dict(default=False, required=False, type='bool'),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmware_cluster = VMwareCluster(module)
+ vmware_cluster.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_datacenter.py b/lib/ansible/modules/cloud/vmware/vmware_datacenter.py
new file mode 100644
index 0000000000..fb60f2c9f5
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_datacenter.py
@@ -0,0 +1,164 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_datacenter
+short_description: Manage VMware vSphere Datacenters
+description:
+ - Manage VMware vSphere Datacenters
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac), Kamil Szczygiel (@kamsz)"
+notes:
+ - Tested on vSphere 6.0
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ hostname:
+ description:
+ - The hostname or IP address of the vSphere vCenter API server
+ required: True
+ username:
+ description:
+ - The username of the vSphere vCenter
+ required: True
+ aliases: ['user', 'admin']
+ password:
+ description:
+ - The password of the vSphere vCenter
+ required: True
+ aliases: ['pass', 'pwd']
+ datacenter_name:
+ description:
+ - The name of the datacenter the cluster will be created in.
+ required: True
+ state:
+ description:
+ - If the datacenter should be present or absent
+ choices: ['present', 'absent']
+ default: present
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example vmware_datacenter command from Ansible Playbooks
+- name: Create Datacenter
+ local_action: >
+ vmware_datacenter
+ hostname="{{ ansible_ssh_host }}" username=root password=vmware
+ datacenter_name="datacenter" state=present
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def get_datacenter(context, module):
+ try:
+ datacenter_name = module.params.get('datacenter_name')
+ datacenter = find_datacenter_by_name(context, datacenter_name)
+ return datacenter
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+
+
+def create_datacenter(context, module):
+ datacenter_name = module.params.get('datacenter_name')
+ folder = context.rootFolder
+
+ try:
+ datacenter = get_datacenter(context, module)
+ changed = False
+ if not datacenter:
+ changed = True
+ if not module.check_mode:
+ folder.CreateDatacenter(name=datacenter_name)
+ module.exit_json(changed=changed)
+ except vim.fault.DuplicateName:
+ module.fail_json(msg="A datacenter with the name %s already exists" % datacenter_name)
+ except vim.fault.InvalidName:
+ module.fail_json(msg="%s is an invalid name for a cluster" % datacenter_name)
+ except vmodl.fault.NotSupported:
+ # This should never happen
+ module.fail_json(msg="Trying to create a datacenter on an incorrect folder object")
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+
+
+def destroy_datacenter(context, module):
+ result = None
+
+ try:
+ datacenter = get_datacenter(context, module)
+ changed = False
+ if datacenter:
+ changed = True
+ if not module.check_mode:
+ task = datacenter.Destroy_Task()
+ changed, result = wait_for_task(task)
+ module.exit_json(changed=changed, result=result)
+ except vim.fault.VimFault as vim_fault:
+ module.fail_json(msg=vim_fault.msg)
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ datacenter_name=dict(required=True, type='str'),
+ state=dict(default='present', choices=['present', 'absent'], type='str')
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ context = connect_to_api(module)
+ state = module.params.get('state')
+
+ if state == 'present':
+ create_datacenter(context, module)
+
+ if state == 'absent':
+ destroy_datacenter(context, module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.vmware import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_dns_config.py b/lib/ansible/modules/cloud/vmware/vmware_dns_config.py
new file mode 100644
index 0000000000..4faa8b6e29
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_dns_config.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_dns_config
+short_description: Manage VMware ESXi DNS Configuration
+description:
+ - Manage VMware ESXi DNS Configuration
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ change_hostname_to:
+ description:
+ - The hostname that an ESXi host should be changed to.
+ required: True
+ domainname:
+ description:
+ - The domain the ESXi host should be apart of.
+ required: True
+ dns_servers:
+ description:
+ - The DNS servers that the host should be configured to use.
+ required: True
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example vmware_dns_config command from Ansible Playbooks
+- name: Configure ESXi hostname and DNS servers
+ local_action:
+ module: vmware_dns_config
+ hostname: esxi_hostname
+ username: root
+ password: your_password
+ change_hostname_to: esx01
+ domainname: foo.org
+ dns_servers:
+ - 8.8.8.8
+ - 8.8.4.4
+'''
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def configure_dns(host_system, hostname, domainname, dns_servers):
+
+ changed = False
+ host_config_manager = host_system.configManager
+ host_network_system = host_config_manager.networkSystem
+ config = host_network_system.dnsConfig
+
+ config.dhcp = False
+
+ if config.address != dns_servers:
+ config.address = dns_servers
+ changed = True
+ if config.domainName != domainname:
+ config.domainName = domainname
+ changed = True
+ if config.hostName != hostname:
+ config.hostName = hostname
+ changed = True
+ if changed:
+ host_network_system.UpdateDnsConfig(config)
+
+ return changed
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(change_hostname_to=dict(required=True, type='str'),
+ domainname=dict(required=True, type='str'),
+ dns_servers=dict(required=True, type='list')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ change_hostname_to = module.params['change_hostname_to']
+ domainname = module.params['domainname']
+ dns_servers = module.params['dns_servers']
+ try:
+ content = connect_to_api(module)
+ host = get_all_objs(content, [vim.HostSystem])
+ if not host:
+ module.fail_json(msg="Unable to locate Physical Host.")
+ host_system = host.keys()[0]
+ changed = configure_dns(host_system, change_hostname_to, domainname, dns_servers)
+ module.exit_json(changed=changed)
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_dvs_host.py b/lib/ansible/modules/cloud/vmware/vmware_dvs_host.py
new file mode 100644
index 0000000000..031b90ec66
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_dvs_host.py
@@ -0,0 +1,253 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_dvs_host
+short_description: Add or remove a host from distributed virtual switch
+description:
+ - Add or remove a host from distributed virtual switch
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ esxi_hostname:
+ description:
+ - The ESXi hostname
+ required: True
+ switch_name:
+ description:
+ - The name of the Distributed vSwitch
+ required: True
+ vmnics:
+ description:
+ - The ESXi hosts vmnics to use with the Distributed vSwitch
+ required: True
+ state:
+ description:
+ - If the host should be present or absent attached to the vSwitch
+ choices: ['present', 'absent']
+ required: True
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example vmware_dvs_host command from Ansible Playbooks
+- name: Add Host to dVS
+ local_action:
+ module: vmware_dvs_host
+ hostname: vcenter_ip_or_hostname
+ username: vcenter_username
+ password: vcenter_password
+ esxi_hostname: esxi_hostname_as_listed_in_vcenter
+ switch_name: dvSwitch
+ vmnics:
+ - vmnic0
+ - vmnic1
+ state: present
+'''
+
+try:
+ import collections
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+class VMwareDvsHost(object):
+ def __init__(self, module):
+ self.module = module
+ self.dv_switch = None
+ self.uplink_portgroup = None
+ self.host = None
+ self.dv_switch = None
+ self.nic = None
+ self.content = connect_to_api(self.module)
+ self.state = self.module.params['state']
+ self.switch_name = self.module.params['switch_name']
+ self.esxi_hostname = self.module.params['esxi_hostname']
+ self.vmnics = self.module.params['vmnics']
+
+ def process_state(self):
+ try:
+ dvs_host_states = {
+ 'absent': {
+ 'present': self.state_destroy_dvs_host,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'update': self.state_update_dvs_host,
+ 'present': self.state_exit_unchanged,
+ 'absent': self.state_create_dvs_host,
+ }
+ }
+
+ dvs_host_states[self.state][self.check_dvs_host_state()]()
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+ def find_dvspg_by_name(self):
+ portgroups = self.dv_switch.portgroup
+
+ for pg in portgroups:
+ if pg.name == self.portgroup_name:
+ return pg
+ return None
+
+ def find_dvs_uplink_pg(self):
+ # There should only always be a single uplink port group on
+ # a distributed virtual switch
+
+ if len(self.dv_switch.config.uplinkPortgroup):
+ return self.dv_switch.config.uplinkPortgroup[0]
+ else:
+ return None
+
+ # operation should be edit, add and remove
+ def modify_dvs_host(self, operation):
+ spec = vim.DistributedVirtualSwitch.ConfigSpec()
+ spec.configVersion = self.dv_switch.config.configVersion
+ spec.host = [vim.dvs.HostMember.ConfigSpec()]
+ spec.host[0].operation = operation
+ spec.host[0].host = self.host
+
+ if operation in ("edit", "add"):
+ spec.host[0].backing = vim.dvs.HostMember.PnicBacking()
+ count = 0
+
+ for nic in self.vmnics:
+ spec.host[0].backing.pnicSpec.append(vim.dvs.HostMember.PnicSpec())
+ spec.host[0].backing.pnicSpec[count].pnicDevice = nic
+ spec.host[0].backing.pnicSpec[count].uplinkPortgroupKey = self.uplink_portgroup.key
+ count += 1
+
+ task = self.dv_switch.ReconfigureDvs_Task(spec)
+ changed, result = wait_for_task(task)
+ return changed, result
+
+ def state_destroy_dvs_host(self):
+ operation = "remove"
+ changed = True
+ result = None
+
+ if not self.module.check_mode:
+ changed, result = self.modify_dvs_host(operation)
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def state_update_dvs_host(self):
+ operation = "edit"
+ changed = True
+ result = None
+
+ if not self.module.check_mode:
+ changed, result = self.modify_dvs_host(operation)
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def state_create_dvs_host(self):
+ operation = "add"
+ changed = True
+ result = None
+
+ if not self.module.check_mode:
+ changed, result = self.modify_dvs_host(operation)
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def find_host_attached_dvs(self):
+ for dvs_host_member in self.dv_switch.config.host:
+ if dvs_host_member.config.host.name == self.esxi_hostname:
+ return dvs_host_member.config.host
+
+ return None
+
+ def check_uplinks(self):
+ pnic_device = []
+
+ for dvs_host_member in self.dv_switch.config.host:
+ if dvs_host_member.config.host == self.host:
+ for pnicSpec in dvs_host_member.config.backing.pnicSpec:
+ pnic_device.append(pnicSpec.pnicDevice)
+
+ return collections.Counter(pnic_device) == collections.Counter(self.vmnics)
+
+ def check_dvs_host_state(self):
+ self.dv_switch = find_dvs_by_name(self.content, self.switch_name)
+
+ if self.dv_switch is None:
+ raise Exception("A distributed virtual switch %s does not exist" % self.switch_name)
+
+ self.uplink_portgroup = self.find_dvs_uplink_pg()
+
+ if self.uplink_portgroup is None:
+ raise Exception("An uplink portgroup does not exist on the distributed virtual switch %s"
+ % self.switch_name)
+
+ self.host = self.find_host_attached_dvs()
+
+ if self.host is None:
+ # We still need the HostSystem object to add the host
+ # to the distributed vswitch
+ self.host = find_hostsystem_by_name(self.content, self.esxi_hostname)
+ if self.host is None:
+ self.module.fail_json(msg="The esxi_hostname %s does not exist in vCenter" % self.esxi_hostname)
+ return 'absent'
+ else:
+ if self.check_uplinks():
+ return 'present'
+ else:
+ return 'update'
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(esxi_hostname=dict(required=True, type='str'),
+ switch_name=dict(required=True, type='str'),
+ vmnics=dict(required=True, type='list'),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmware_dvs_host = VMwareDvsHost(module)
+ vmware_dvs_host.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_dvs_portgroup.py b/lib/ansible/modules/cloud/vmware/vmware_dvs_portgroup.py
new file mode 100644
index 0000000000..58b4cff67c
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_dvs_portgroup.py
@@ -0,0 +1,202 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_dvs_portgroup
+short_description: Create or remove a Distributed vSwitch portgroup
+description:
+ - Create or remove a Distributed vSwitch portgroup
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ portgroup_name:
+ description:
+ - The name of the portgroup that is to be created or deleted
+ required: True
+ switch_name:
+ description:
+ - The name of the distributed vSwitch the port group should be created on.
+ required: True
+ vlan_id:
+ description:
+ - The VLAN ID that should be configured with the portgroup
+ required: True
+ num_ports:
+ description:
+ - The number of ports the portgroup should contain
+ required: True
+ portgroup_type:
+ description:
+ - See VMware KB 1022312 regarding portgroup types
+ required: True
+ choices:
+ - 'earlyBinding'
+ - 'lateBinding'
+ - 'ephemeral'
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+ - name: Create Management portgroup
+ local_action:
+ module: vmware_dvs_portgroup
+ hostname: vcenter_ip_or_hostname
+ username: vcenter_username
+ password: vcenter_password
+ portgroup_name: Management
+ switch_name: dvSwitch
+ vlan_id: 123
+ num_ports: 120
+ portgroup_type: earlyBinding
+ state: present
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+class VMwareDvsPortgroup(object):
+ def __init__(self, module):
+ self.module = module
+ self.dvs_portgroup = None
+ self.switch_name = self.module.params['switch_name']
+ self.portgroup_name = self.module.params['portgroup_name']
+ self.vlan_id = self.module.params['vlan_id']
+ self.num_ports = self.module.params['num_ports']
+ self.portgroup_type = self.module.params['portgroup_type']
+ self.dv_switch = None
+ self.state = self.module.params['state']
+ self.content = connect_to_api(module)
+
+ def process_state(self):
+ try:
+ dvspg_states = {
+ 'absent': {
+ 'present': self.state_destroy_dvspg,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'update': self.state_update_dvspg,
+ 'present': self.state_exit_unchanged,
+ 'absent': self.state_create_dvspg,
+ }
+ }
+ dvspg_states[self.state][self.check_dvspg_state()]()
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+ def create_port_group(self):
+ config = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+
+ config.name = self.portgroup_name
+ config.numPorts = self.num_ports
+
+ # vim.VMwareDVSPortSetting() does not exist in the pyvmomi documentation
+ # but this is the correct managed object type.
+
+ config.defaultPortConfig = vim.VMwareDVSPortSetting()
+
+ # vim.VmwareDistributedVirtualSwitchVlanIdSpec() does not exist in the
+ # pyvmomi documentation but this is the correct managed object type
+ config.defaultPortConfig.vlan = vim.VmwareDistributedVirtualSwitchVlanIdSpec()
+ config.defaultPortConfig.vlan.inherited = False
+ config.defaultPortConfig.vlan.vlanId = self.vlan_id
+ config.type = self.portgroup_type
+
+ spec = [config]
+ task = self.dv_switch.AddDVPortgroup_Task(spec)
+ changed, result = wait_for_task(task)
+ return changed, result
+
+ def state_destroy_dvspg(self):
+ changed = True
+ result = None
+
+ if not self.module.check_mode:
+ task = self.dvs_portgroup.Destroy_Task()
+ changed, result = wait_for_task(task)
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def state_update_dvspg(self):
+ self.module.exit_json(changed=False, msg="Currently not implemented.")
+
+ def state_create_dvspg(self):
+ changed = True
+ result = None
+
+ if not self.module.check_mode:
+ changed, result = self.create_port_group()
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def check_dvspg_state(self):
+ self.dv_switch = find_dvs_by_name(self.content, self.switch_name)
+
+ if self.dv_switch is None:
+ raise Exception("A distributed virtual switch with name %s does not exist" % self.switch_name)
+ self.dvs_portgroup = find_dvspg_by_name(self.dv_switch, self.portgroup_name)
+
+ if self.dvs_portgroup is None:
+ return 'absent'
+ else:
+ return 'present'
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(portgroup_name=dict(required=True, type='str'),
+ switch_name=dict(required=True, type='str'),
+ vlan_id=dict(required=True, type='int'),
+ num_ports=dict(required=True, type='int'),
+ portgroup_type=dict(required=True, choices=['earlyBinding', 'lateBinding', 'ephemeral'], type='str'),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmware_dvs_portgroup = VMwareDvsPortgroup(module)
+ vmware_dvs_portgroup.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_dvswitch.py b/lib/ansible/modules/cloud/vmware/vmware_dvswitch.py
new file mode 100644
index 0000000000..b3108f6a9d
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_dvswitch.py
@@ -0,0 +1,213 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_dvswitch
+short_description: Create or remove a distributed vSwitch
+description:
+ - Create or remove a distributed vSwitch
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ datacenter_name:
+ description:
+ - The name of the datacenter that will contain the dvSwitch
+ required: True
+ switch_name:
+ description:
+ - The name of the switch to create or remove
+ required: True
+ mtu:
+ description:
+ - The switch maximum transmission unit
+ required: True
+ uplink_quantity:
+ description:
+ - Quantity of uplink per ESXi host added to the switch
+ required: True
+ discovery_proto:
+ description:
+ - Link discovery protocol between Cisco and Link Layer discovery
+ choices:
+ - 'cdp'
+ - 'lldp'
+ required: True
+ discovery_operation:
+ description:
+ - Select the discovery operation
+ choices:
+ - 'both'
+ - 'none'
+ - 'advertise'
+ - 'listen'
+ state:
+ description:
+ - Create or remove dvSwitch
+ default: 'present'
+ choices:
+ - 'present'
+ - 'absent'
+ required: False
+extends_documentation_fragment: vmware.documentation
+'''
+EXAMPLES = '''
+- name: Create dvswitch
+ local_action:
+ module: vmware_dvswitch
+ hostname: vcenter_ip_or_hostname
+ username: vcenter_username
+ password: vcenter_password
+ datacenter_name: datacenter
+ switch_name: dvSwitch
+ mtu: 9000
+ uplink_quantity: 2
+ discovery_proto: lldp
+ discovery_operation: both
+ state: present
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+class VMwareDVSwitch(object):
+ def __init__(self, module):
+ self.module = module
+ self.dvs = None
+ self.switch_name = self.module.params['switch_name']
+ self.datacenter_name = self.module.params['datacenter_name']
+ self.mtu = self.module.params['mtu']
+ self.uplink_quantity = self.module.params['uplink_quantity']
+ self.discovery_proto = self.module.params['discovery_proto']
+ self.discovery_operation = self.module.params['discovery_operation']
+ self.switch_name = self.module.params['switch_name']
+ self.state = self.module.params['state']
+ self.content = connect_to_api(module)
+
+ def process_state(self):
+ try:
+ dvs_states = {
+ 'absent': {
+ 'present': self.state_destroy_dvs,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'update': self.state_update_dvs,
+ 'present': self.state_exit_unchanged,
+ 'absent': self.state_create_dvs,
+ }
+ }
+ dvs_states[self.state][self.check_dvs_configuration()]()
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+
+ def create_dvswitch(self, network_folder):
+ result = None
+ changed = False
+
+ spec = vim.DistributedVirtualSwitch.CreateSpec()
+ spec.configSpec = vim.dvs.VmwareDistributedVirtualSwitch.ConfigSpec()
+ spec.configSpec.uplinkPortPolicy = vim.DistributedVirtualSwitch.NameArrayUplinkPortPolicy()
+ spec.configSpec.linkDiscoveryProtocolConfig = vim.host.LinkDiscoveryProtocolConfig()
+
+ spec.configSpec.name = self.switch_name
+ spec.configSpec.maxMtu = self.mtu
+ spec.configSpec.linkDiscoveryProtocolConfig.protocol = self.discovery_proto
+ spec.configSpec.linkDiscoveryProtocolConfig.operation = self.discovery_operation
+ spec.productInfo = vim.dvs.ProductSpec()
+ spec.productInfo.name = "DVS"
+ spec.productInfo.vendor = "VMware"
+
+ for count in range(1, self.uplink_quantity+1):
+ spec.configSpec.uplinkPortPolicy.uplinkPortName.append("uplink%d" % count)
+
+ task = network_folder.CreateDVS_Task(spec)
+ changed, result = wait_for_task(task)
+ return changed, result
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def state_destroy_dvs(self):
+ task = self.dvs.Destroy_Task()
+ changed, result = wait_for_task(task)
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def state_update_dvs(self):
+ self.module.exit_json(changed=False, msg="Currently not implemented.")
+
+ def state_create_dvs(self):
+ changed = True
+ result = None
+
+ if not self.module.check_mode:
+ dc = find_datacenter_by_name(self.content, self.datacenter_name)
+ changed, result = self.create_dvswitch(dc.networkFolder)
+
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def check_dvs_configuration(self):
+ self.dvs = find_dvs_by_name(self.content, self.switch_name)
+ if self.dvs is None:
+ return 'absent'
+ else:
+ return 'present'
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(datacenter_name=dict(required=True, type='str'),
+ switch_name=dict(required=True, type='str'),
+ mtu=dict(required=True, type='int'),
+ uplink_quantity=dict(required=True, type='int'),
+ discovery_proto=dict(required=True, choices=['cdp', 'lldp'], type='str'),
+ discovery_operation=dict(required=True, choices=['both', 'none', 'advertise', 'listen'], type='str'),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmware_dvswitch = VMwareDVSwitch(module)
+ vmware_dvswitch.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_guest.py b/lib/ansible/modules/cloud/vmware/vmware_guest.py
new file mode 100644
index 0000000000..b4843a14bd
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_guest.py
@@ -0,0 +1,1349 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_guest
+short_description: Manages virtualmachines in vcenter
+description:
+ - Uses pyvmomi to ...
+ - copy a template to a new virtualmachine
+ - poweron/poweroff/restart a virtualmachine
+ - remove a virtualmachine
+version_added: 2.2
+author: James Tanner (@jctanner) <tanner.jc@gmail.com>
+notes:
+ - Tested on vSphere 6.0
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ state:
+ description:
+ - What state should the virtualmachine be in?
+ required: True
+ choices: ['present', 'absent', 'poweredon', 'poweredoff', 'restarted', 'suspended']
+ name:
+ description:
+ - Name of the newly deployed guest
+ required: True
+ name_match:
+ description:
+ - If multiple vms matching the name, use the first or last found
+ required: False
+ default: 'first'
+ choices: ['first', 'last']
+ uuid:
+ description:
+ - UUID of the instance to manage if known, this is vmware's unique identifier.
+ - This is required if name is not supplied.
+ required: False
+ template:
+ description:
+ - Name of the template to deploy, if needed to create the guest (state=present).
+ - If the guest exists already this setting will be ignored.
+ required: False
+ folder:
+ description:
+ - Destination folder path for the new guest
+ required: False
+ hardware:
+ description:
+ - Attributes such as cpus, memory, osid, and disk controller
+ required: False
+ disk:
+ description:
+ - A list of disks to add
+ required: False
+ nic:
+ description:
+ - A list of nics to add
+ required: False
+ wait_for_ip_address:
+ description:
+ - Wait until vcenter detects an IP address for the guest
+ required: False
+ force:
+ description:
+ - Ignore warnings and complete the actions
+ required: False
+ datacenter:
+ description:
+ - Destination datacenter for the deploy operation
+ required: True
+ esxi_hostname:
+ description:
+ - The esxi hostname where the VM will run.
+ required: False
+ annotation:
+ description:
+ - A note or annotation to include in the VM
+ required: False
+ version_added: "2.3"
+ customize:
+ description:
+ - Should customization spec be run
+ required: False
+ version_added: "2.3"
+ ips:
+ description:
+ - IP Addresses to set
+ required: False
+ version_added: "2.3"
+ networks:
+ description:
+ - Network to use should include VM network name and gateway
+ required: False
+ version_added: "2.3"
+ dns_servers:
+ description:
+ - DNS servers to use
+ required: False
+ version_added: "2.3"
+ domain:
+ description:
+ - Domain to use while customizing
+ required: False
+ version_added: "2.3"
+ snapshot_op:
+ description:
+ - A key, value pair of snapshot operation types and their additional required parameters.
+ required: False
+ version_added: "2.3"
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+Example from Ansible playbook
+#
+# Create a VM from a template
+#
+ - name: create the VM
+ vmware_guest:
+ validate_certs: False
+ hostname: 192.0.2.44
+ username: administrator@vsphere.local
+ password: vmware
+ name: testvm_2
+ state: poweredon
+ folder: testvms
+ disk:
+ - size_gb: 10
+ type: thin
+ datastore: g73_datastore
+ nic:
+ - type: vmxnet3
+ network: VM Network
+ network_type: standard
+ hardware:
+ memory_mb: 512
+ num_cpus: 1
+ osid: centos64guest
+ scsi: paravirtual
+ datacenter: datacenter1
+ esxi_hostname: 192.0.2.117
+ template: template_el7
+ wait_for_ip_address: yes
+ register: deploy
+
+#
+# Clone Template and customize
+#
+ - name: Clone template and customize
+ vmware_guest:
+ hostname: "192.168.1.209"
+ username: "administrator@vsphere.local"
+ password: "vmware"
+ validate_certs: False
+ name: testvm-2
+ datacenter: datacenter1
+ cluster: cluster
+ validate_certs: False
+ template: template_el7
+ customize: True
+ domain: "example.com"
+ dns_servers: ['192.168.1.1','192.168.1.2']
+ ips: "192.168.1.100"
+ networks:
+ '192.168.1.0/24':
+ network: 'VM Network'
+ gateway: '192.168.1.1'
+#
+# Gather facts only
+#
+ - name: gather the VM facts
+ vmware_guest:
+ validate_certs: False
+ hostname: 192.168.1.209
+ username: administrator@vsphere.local
+ password: vmware
+ name: testvm_2
+ esxi_hostname: 192.168.1.117
+ register: facts
+
+### Snapshot Operations
+# Create snapshot
+ - vmware_guest:
+ hostname: 192.168.1.209
+ username: administrator@vsphere.local
+ password: vmware
+ validate_certs: False
+ name: dummy_vm
+ snapshot_op:
+ op_type: create
+ name: snap1
+ description: snap1_description
+
+# Remove a snapshot
+ - vmware_guest:
+ hostname: 192.168.1.209
+ username: administrator@vsphere.local
+ password: vmware
+ validate_certs: False
+ name: dummy_vm
+ snapshot_op:
+ op_type: remove
+ name: snap1
+
+# Revert to a snapshot
+ - vmware_guest:
+ hostname: 192.168.1.209
+ username: administrator@vsphere.local
+ password: vmware
+ validate_certs: False
+ name: dummy_vm
+ snapshot_op:
+ op_type: revert
+ name: snap1
+
+# List all snapshots of a VM
+ - vmware_guest:
+ hostname: 192.168.1.209
+ username: administrator@vsphere.local
+ password: vmware
+ validate_certs: False
+ name: dummy_vm
+ snapshot_op:
+ op_type: list_all
+
+# List current snapshot of a VM
+ - vmware_guest:
+ hostname: 192.168.1.209
+ username: administrator@vsphere.local
+ password: vmware
+ validate_certs: False
+ name: dummy_vm
+ snapshot_op:
+ op_type: list_current
+
+# Remove all snapshots of a VM
+ - vmware_guest:
+ hostname: 192.168.1.209
+ username: administrator@vsphere.local
+ password: vmware
+ validate_certs: False
+ name: dummy_vm
+ snapshot_op:
+ op_type: remove_all
+'''
+
+RETURN = """
+instance:
+ descripton: metadata about the new virtualmachine
+ returned: always
+ type: dict
+ sample: None
+"""
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+HAS_PYVMOMI = False
+try:
+ import pyVmomi
+ from pyVmomi import vim
+ HAS_PYVMOMI = True
+except ImportError:
+ pass
+
+import os
+import time
+from netaddr import IPNetwork, IPAddress
+
+from ansible.module_utils.urls import fetch_url
+
+class PyVmomiHelper(object):
+
+ def __init__(self, module):
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi module required')
+
+ self.module = module
+ self.params = module.params
+ self.si = None
+ self.smartconnect()
+ self.datacenter = None
+ self.folders = None
+ self.foldermap = None
+
+ def smartconnect(self):
+ self.content = connect_to_api(self.module)
+
+ def _build_folder_tree(self, folder, tree={}, treepath=None):
+
+ tree = {'virtualmachines': [],
+ 'subfolders': {},
+ 'vimobj': folder,
+ 'name': folder.name}
+
+ children = None
+ if hasattr(folder, 'childEntity'):
+ children = folder.childEntity
+
+ if children:
+ for child in children:
+ if child == folder or child in tree:
+ continue
+ if isinstance(child, vim.Folder):
+ ctree = self._build_folder_tree(child)
+ tree['subfolders'][child] = dict.copy(ctree)
+ elif isinstance(child, vim.VirtualMachine):
+ tree['virtualmachines'].append(child)
+ else:
+ if isinstance(folder, vim.VirtualMachine):
+ return folder
+ return tree
+
+
+ def _build_folder_map(self, folder, vmap={}, inpath='/'):
+
+ ''' Build a searchable index for vms+uuids+folders '''
+
+ if isinstance(folder, tuple):
+ folder = folder[1]
+
+ if not 'names' in vmap:
+ vmap['names'] = {}
+ if not 'uuids' in vmap:
+ vmap['uuids'] = {}
+ if not 'paths' in vmap:
+ vmap['paths'] = {}
+
+ if inpath == '/':
+ thispath = '/vm'
+ else:
+ thispath = os.path.join(inpath, folder['name'])
+
+ if thispath not in vmap['paths']:
+ vmap['paths'][thispath] = []
+
+ # helpful for isolating folder objects later on
+ if not 'path_by_fvim' in vmap:
+ vmap['path_by_fvim'] = {}
+ if not 'fvim_by_path' in vmap:
+ vmap['fvim_by_path'] = {}
+ # store object by path and store path by object
+ vmap['fvim_by_path'][thispath] = folder['vimobj']
+ vmap['path_by_fvim'][folder['vimobj']] = thispath
+
+ # helpful for isolating vm objects later on
+ if not 'path_by_vvim' in vmap:
+ vmap['path_by_vvim'] = {}
+ if not 'vvim_by_path' in vmap:
+ vmap['vvim_by_path'] = {}
+ if thispath not in vmap['vvim_by_path']:
+ vmap['vvim_by_path'][thispath] = []
+
+
+ for item in folder.items():
+ k = item[0]
+ v = item[1]
+
+ if k == 'name':
+ pass
+ elif k == 'subfolders':
+ for x in v.items():
+ vmap = self._build_folder_map(x, vmap=vmap, inpath=thispath)
+ elif k == 'virtualmachines':
+ for x in v:
+ if not x.config.name in vmap['names']:
+ vmap['names'][x.config.name] = []
+ vmap['names'][x.config.name].append(x.config.uuid)
+ vmap['uuids'][x.config.uuid] = x.config.name
+ vmap['paths'][thispath].append(x.config.uuid)
+
+ if x not in vmap['vvim_by_path'][thispath]:
+ vmap['vvim_by_path'][thispath].append(x)
+ if x not in vmap['path_by_vvim']:
+ vmap['path_by_vvim'][x] = thispath
+ return vmap
+
+ def getfolders(self):
+
+ if not self.datacenter:
+ self.get_datacenter()
+ self.folders = self._build_folder_tree(self.datacenter.vmFolder)
+ self.folder_map = self._build_folder_map(self.folders)
+ return (self.folders, self.folder_map)
+
+ def compile_folder_path_for_object(self, vobj):
+ ''' make a /vm/foo/bar/baz like folder path for an object '''
+ paths = []
+ if isinstance(vobj, vim.Folder):
+ paths.append(vobj.name)
+
+ thisobj = vobj
+ while hasattr(thisobj, 'parent'):
+ thisobj = thisobj.parent
+ if isinstance(thisobj, vim.Folder):
+ paths.append(thisobj.name)
+ paths.reverse()
+ if paths[0] == 'Datacenters':
+ paths.remove('Datacenters')
+ return '/' + '/'.join(paths)
+
+ def get_datacenter(self):
+ self.datacenter = get_obj(self.content, [vim.Datacenter],
+ self.params['datacenter'])
+
+ def getvm(self, name=None, uuid=None, folder=None, name_match=None):
+
+ # https://www.vmware.com/support/developer/vc-sdk/visdk2xpubs/ReferenceGuide/vim.SearchIndex.html
+ # self.si.content.searchIndex.FindByInventoryPath('DC1/vm/test_folder')
+
+ vm = None
+ folder_path = None
+ searchpath = None
+
+ if uuid:
+ vm = self.content.searchIndex.FindByUuid(uuid=uuid, vmSearch=True)
+
+ elif folder:
+
+ if self.params['folder'].endswith('/'):
+ self.params['folder'] = self.params['folder'][0:-1]
+
+ # Build the absolute folder path to pass into the search method
+ if self.params['folder'].startswith('/vm'):
+ searchpath = '%s' % self.params['datacenter']
+ searchpath += self.params['folder']
+ elif self.params['folder'].startswith('/'):
+ searchpath = '%s' % self.params['datacenter']
+ searchpath += '/vm' + self.params['folder']
+ else:
+ # need to look for matching absolute path
+ if not self.folders:
+ self.getfolders()
+ paths = self.folder_map['paths'].keys()
+ paths = [x for x in paths if x.endswith(self.params['folder'])]
+ if len(paths) > 1:
+ self.module.fail_json(msg='%s matches more than one folder. Please use the absolute path starting with /vm/' % self.params['folder'])
+ elif paths:
+ searchpath = paths[0]
+
+ if searchpath:
+ # get all objects for this path ...
+ fObj = self.content.searchIndex.FindByInventoryPath(searchpath)
+ if fObj:
+ if isinstance(fObj, vim.Datacenter):
+ fObj = fObj.vmFolder
+ for cObj in fObj.childEntity:
+ if not isinstance(cObj, vim.VirtualMachine):
+ continue
+ if cObj.name == name:
+ vm = cObj
+ break
+
+ if not vm:
+
+ # FIXME - this is unused if folder has a default value
+ vmList = get_all_objs(self.content, [vim.VirtualMachine])
+
+ # narrow down by folder
+ if folder:
+ if not self.folders:
+ self.getfolders()
+
+ # compare the folder path of each VM against the search path
+ for item in vmList.items():
+ vobj = item[0]
+ if not isinstance(vobj.parent, vim.Folder):
+ continue
+ if self.compile_folder_path_for_object(vobj) == searchpath:
+ return vobj
+
+ if name_match:
+ if name_match == 'first':
+ vm = get_obj(self.content, [vim.VirtualMachine], name)
+ elif name_match == 'last':
+ matches = []
+ vmList = get_all_objs(self.content, [vim.VirtualMachine])
+ for thisvm in vmList:
+ if thisvm.config.name == name:
+ matches.append(thisvm)
+ if matches:
+ vm = matches[-1]
+ else:
+ matches = []
+ vmList = get_all_objs(self.content, [vim.VirtualMachine])
+ for thisvm in vmList:
+ if thisvm.config.name == name:
+ matches.append(thisvm)
+ if len(matches) > 1:
+ module.fail_json(msg='more than 1 vm exists by the name %s. Please specify a uuid, or a folder, or a datacenter or name_match' % name)
+ if matches:
+ vm = matches[0]
+
+ return vm
+
+
+ def set_powerstate(self, vm, state, force):
+ """
+ Set the power status for a VM determined by the current and
+ requested states. force is forceful
+ """
+ facts = self.gather_facts(vm)
+ expected_state = state.replace('_', '').lower()
+ current_state = facts['hw_power_status'].lower()
+ result = {}
+
+ # Need Force
+ if not force and current_state not in ['poweredon', 'poweredoff']:
+ return "VM is in %s power state. Force is required!" % current_state
+
+ # State is already true
+ if current_state == expected_state:
+ result['changed'] = False
+ result['failed'] = False
+ else:
+ task = None
+ try:
+ if expected_state == 'poweredoff':
+ task = vm.PowerOff()
+
+ elif expected_state == 'poweredon':
+ task = vm.PowerOn()
+
+ elif expected_state == 'restarted':
+ if current_state in ('poweredon', 'poweringon', 'resetting'):
+ task = vm.Reset()
+ else:
+ result = {'changed': False, 'failed': True,
+ 'msg': "Cannot restart VM in the current state %s" % current_state}
+
+ except Exception:
+ result = {'changed': False, 'failed': True,
+ 'msg': get_exception()}
+
+ if task:
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ result = {'changed': False, 'failed': True, 'msg': task.info.error.msg}
+ else:
+ result = {'changed': True, 'failed': False}
+
+ # need to get new metadata if changed
+ if result['changed']:
+ newvm = self.getvm(uuid=vm.config.uuid)
+ facts = self.gather_facts(newvm)
+ result['instance'] = facts
+ return result
+
+
+ def gather_facts(self, vm):
+
+ ''' Gather facts from vim.VirtualMachine object. '''
+
+ facts = {
+ 'module_hw': True,
+ 'hw_name': vm.config.name,
+ 'hw_power_status': vm.summary.runtime.powerState,
+ 'hw_guest_full_name': vm.summary.guest.guestFullName,
+ 'hw_guest_id': vm.summary.guest.guestId,
+ 'hw_product_uuid': vm.config.uuid,
+ 'hw_processor_count': vm.config.hardware.numCPU,
+ 'hw_memtotal_mb': vm.config.hardware.memoryMB,
+ 'hw_interfaces':[],
+ 'ipv4': None,
+ 'ipv6': None,
+ }
+
+ netDict = {}
+ for device in vm.guest.net:
+ mac = device.macAddress
+ ips = list(device.ipAddress)
+ netDict[mac] = ips
+ for k,v in netDict.iteritems():
+ for ipaddress in v:
+ if ipaddress:
+ if '::' in ipaddress:
+ facts['ipv6'] = ipaddress
+ else:
+ facts['ipv4'] = ipaddress
+
+ for idx,entry in enumerate(vm.config.hardware.device):
+ if not hasattr(entry, 'macAddress'):
+ continue
+
+ factname = 'hw_eth' + str(idx)
+ facts[factname] = {
+ 'addresstype': entry.addressType,
+ 'label': entry.deviceInfo.label,
+ 'macaddress': entry.macAddress,
+ 'ipaddresses': netDict.get(entry.macAddress, None),
+ 'macaddress_dash': entry.macAddress.replace(':', '-'),
+ 'summary': entry.deviceInfo.summary,
+ }
+ facts['hw_interfaces'].append('eth'+str(idx))
+
+ return facts
+
+
+ def remove_vm(self, vm):
+ # https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.ManagedEntity.html#destroy
+ task = vm.Destroy()
+ self.wait_for_task(task)
+
+ if task.info.state == 'error':
+ return ({'changed': False, 'failed': True, 'msg': task.info.error.msg})
+ else:
+ return ({'changed': True, 'failed': False})
+
+
+ def deploy_template(self, poweron=False, wait_for_ip=False):
+
+ # https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.CloneSpec.html
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.ConfigSpec.html
+ # https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
+
+ # FIXME:
+ # - clusters
+ # - multiple datacenters
+ # - resource pools
+ # - multiple templates by the same name
+ # - multiple disks
+ # - changing the esx host is ignored?
+ # - static IPs
+
+ # FIXME: need to search for this in the same way as guests to ensure accuracy
+ template = get_obj(self.content, [vim.VirtualMachine], self.params['template'])
+ if not template:
+ self.module.fail_json(msg="Could not find a template named %s" % self.params['template'])
+
+ datacenters = get_all_objs(self.content, [vim.Datacenter])
+ datacenter = get_obj(self.content, [vim.Datacenter],
+ self.params['datacenter'])
+ if not datacenter:
+ self.module.fail_json(msg='No datacenter named %s was found' % self.params['datacenter'])
+
+ if not self.foldermap:
+ self.folders, self.foldermap = self.getfolders()
+
+ # find matching folders
+ if self.params['folder'].startswith('/'):
+ folders = [x for x in self.foldermap['fvim_by_path'].items() if x[0] == self.params['folder']]
+ else:
+ folders = [x for x in self.foldermap['fvim_by_path'].items() if x[0].endswith(self.params['folder'])]
+
+ # throw error if more than one match or no matches
+ if len(folders) == 0:
+ self.module.fail_json(msg='no folder matched the path: %s' % self.params['folder'])
+ elif len(folders) > 1:
+ self.module.fail_json(msg='too many folders matched "%s", please give the full path starting with /vm/' % self.params['folder'])
+
+ # grab the folder vim object
+ destfolder = folders[0][1]
+
+ # if the user wants a cluster, get the list of hosts for the cluster and use the first one
+ if self.params['cluster']:
+ cluster = get_obj(self.content, [vim.ClusterComputeResource], self.params['cluster'])
+ if not cluster:
+ self.module.fail_json(msg="Failed to find a cluster named %s" % self.params['cluster'])
+ #resource_pool = cluster.resourcePool
+ hostsystems = [x for x in cluster.host]
+ hostsystem = hostsystems[0]
+ else:
+ hostsystem = get_obj(self.content, [vim.HostSystem], self.params['esxi_hostname'])
+ if not hostsystem:
+ self.module.fail_json(msg="Failed to find a host named %s" % self.params['esxi_hostname'])
+
+ # set the destination datastore in the relocation spec
+ datastore_name = None
+ datastore = None
+ if self.params['disk']:
+ if 'datastore' in self.params['disk'][0]:
+ datastore_name = self.params['disk'][0]['datastore']
+ datastore = get_obj(self.content, [vim.Datastore], datastore_name)
+ if not datastore:
+ # use the template's existing DS
+ disks = [x for x in template.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)]
+ datastore = disks[0].backing.datastore
+ datastore_name = datastore.name
+ if not datastore:
+ self.module.fail_json(msg="Failed to find a matching datastore")
+
+ # create the relocation spec
+ relospec = vim.vm.RelocateSpec()
+ relospec.host = hostsystem
+ relospec.datastore = datastore
+
+ # Find the associated resourcepool for the host system
+ # * FIXME: find resourcepool for clusters too
+ resource_pool = None
+ resource_pools = get_all_objs(self.content, [vim.ResourcePool])
+ for rp in resource_pools.items():
+ if not rp[0]:
+ continue
+ if not hasattr(rp[0], 'parent'):
+ continue
+ if rp[0].parent == hostsystem.parent:
+ resource_pool = rp[0]
+ break
+ if resource_pool:
+ relospec.pool = resource_pool
+ else:
+ self.module.fail_json(msg="Failed to find a resource group for %s" \
+ % hostsystem.name)
+
+ clonespec_kwargs = {}
+ clonespec_kwargs['location'] = relospec
+
+ # create disk spec if not default
+ if self.params['disk']:
+ # grab the template's first disk and modify it for this customization
+ disks = [x for x in template.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)]
+ diskspec = vim.vm.device.VirtualDeviceSpec()
+ # set the operation to edit so that it knows to keep other settings
+ diskspec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ diskspec.device = disks[0]
+
+ # get the first disk attributes
+ pspec = self.params.get('disk')[0]
+
+ # is it thin?
+ if pspec.get('type', '').lower() == 'thin':
+ diskspec.device.backing.thinProvisioned = True
+
+ # which datastore?
+ if pspec.get('datastore'):
+ # This is already handled by the relocation spec,
+ # but it needs to eventually be handled for all the
+ # other disks defined
+ pass
+
+ # what size is it?
+ if [x for x in pspec.keys() if x.startswith('size_') or x == 'size']:
+ # size_tb, size_gb, size_mb, size_kb, size_b ...?
+ if 'size' in pspec:
+ expected = ''.join(c for c in pspec['size'] if c.isdigit())
+ unit = pspec['size'].replace(expected, '').lower()
+ expected = int(expected)
+ else:
+ param = [x for x in pspec.keys() if x.startswith('size_')][0]
+ unit = param.split('_')[-1].lower()
+ expected = [x[1] for x in pspec.items() if x[0].startswith('size_')][0]
+ expected = int(expected)
+
+ kb = None
+ if unit == 'tb':
+ kb = expected * 1024 * 1024 * 1024
+ elif unit == 'gb':
+ kb = expected * 1024 * 1024
+ elif unit ==' mb':
+ kb = expected * 1024
+ elif unit == 'kb':
+ kb = expected
+ else:
+ self.module.fail_json(msg='%s is not a supported unit for disk size' % unit)
+ diskspec.device.capacityInKB = kb
+
+ # tell the configspec that the disk device needs to change
+ configspec = vim.vm.ConfigSpec(deviceChange=[diskspec])
+ clonespec_kwargs['config'] = configspec
+
+ # set cpu/memory/etc
+ if 'hardware' in self.params:
+ if not 'config' in clonespec_kwargs:
+ clonespec_kwargs['config'] = vim.vm.ConfigSpec()
+ if 'num_cpus' in self.params['hardware']:
+ clonespec_kwargs['config'].numCPUs = \
+ int(self.params['hardware']['num_cpus'])
+ if 'memory_mb' in self.params['hardware']:
+ clonespec_kwargs['config'].memoryMB = \
+ int(self.params['hardware']['memory_mb'])
+
+ # lets try and assign a static ip addresss
+ if self.params['customize'] is True:
+ ip_settings = list()
+ if self.params['ips']:
+ for ip_string in self.params['ips']:
+ ip = IPAddress(self.params['ips'])
+ for network in self.params['networks']:
+ if network:
+ if ip in IPNetwork(network):
+ self.params['networks'][network]['ip'] = str(ip)
+ ipnet = IPNetwork(network)
+ self.params['networks'][network]['subnet_mask'] = str(
+ ipnet.netmask
+ )
+ ip_settings.append(self.params['networks'][network])
+
+ key = 0
+ network = get_obj(self.content, [vim.Network], ip_settings[key]['network'])
+ datacenter = get_obj(self.content, [vim.Datacenter], self.params['datacenter'])
+ # get the folder where VMs are kept for this datacenter
+ destfolder = datacenter.vmFolder
+
+ cluster = get_obj(self.content, [vim.ClusterComputeResource],self.params['cluster'])
+
+ devices = []
+ adaptermaps = []
+
+ try:
+ for device in template.config.hardware.device:
+ if hasattr(device, 'addressType'):
+ nic = vim.vm.device.VirtualDeviceSpec()
+ nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
+ nic.device = device
+ devices.append(nic)
+ except:
+ pass
+
+ # single device support
+ nic = vim.vm.device.VirtualDeviceSpec()
+ nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ nic.device = vim.vm.device.VirtualVmxnet3()
+ nic.device.wakeOnLanEnabled = True
+ nic.device.addressType = 'assigned'
+ nic.device.deviceInfo = vim.Description()
+ nic.device.deviceInfo.label = 'Network Adapter %s' % (key + 1)
+ nic.device.deviceInfo.summary = ip_settings[key]['network']
+
+ if hasattr(get_obj(self.content, [vim.Network], ip_settings[key]['network']), 'portKeys'):
+ # VDS switch
+ pg_obj = get_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], ip_settings[key]['network'])
+ dvs_port_connection = vim.dvs.PortConnection()
+ dvs_port_connection.portgroupKey= pg_obj.key
+ dvs_port_connection.switchUuid= pg_obj.config.distributedVirtualSwitch.uuid
+ nic.device.backing = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
+ nic.device.backing.port = dvs_port_connection
+
+ else:
+ # vSwitch
+ nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
+ nic.device.backing.network = get_obj(self.content, [vim.Network], ip_settings[key]['network'])
+ nic.device.backing.deviceName = ip_settings[key]['network']
+
+ nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+ nic.device.connectable.startConnected = True
+ nic.device.connectable.allowGuestControl = True
+ nic.device.connectable.connected = True
+ nic.device.connectable.allowGuestControl = True
+ devices.append(nic)
+
+ # Update the spec with the added NIC
+ clonespec_kwargs['config'].deviceChange = devices
+
+ guest_map = vim.vm.customization.AdapterMapping()
+ guest_map.adapter = vim.vm.customization.IPSettings()
+ guest_map.adapter.ip = vim.vm.customization.FixedIp()
+ guest_map.adapter.ip.ipAddress = str(ip_settings[key]['ip'])
+ guest_map.adapter.subnetMask = str(ip_settings[key]['subnet_mask'])
+
+ try:
+ guest_map.adapter.gateway = ip_settings[key]['gateway']
+ except:
+ pass
+
+ try:
+ guest_map.adapter.dnsDomain = self.params['domain']
+ except:
+ pass
+
+ adaptermaps.append(guest_map)
+
+ # DNS settings
+ globalip = vim.vm.customization.GlobalIPSettings()
+ globalip.dnsServerList = self.params['dns_servers']
+ globalip.dnsSuffixList = str(self.params['domain'])
+
+ # Hostname settings
+ ident = vim.vm.customization.LinuxPrep()
+ ident.domain = str(self.params['domain'])
+ ident.hostName = vim.vm.customization.FixedName()
+ ident.hostName.name = self.params['name']
+
+ customspec = vim.vm.customization.Specification()
+ clonespec_kwargs['customization'] = customspec
+
+ clonespec_kwargs['customization'].nicSettingMap = adaptermaps
+ clonespec_kwargs['customization'].globalIPSettings = globalip
+ clonespec_kwargs['customization'].identity = ident
+
+ clonespec = vim.vm.CloneSpec(**clonespec_kwargs)
+ task = template.Clone(folder=destfolder, name=self.params['name'], spec=clonespec)
+ self.wait_for_task(task)
+
+ if task.info.state == 'error':
+ # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361
+ # https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173
+ return ({'changed': False, 'failed': True, 'msg': task.info.error.msg})
+ else:
+
+ # set annotation
+ vm = task.info.result
+ if self.params['annotation']:
+ annotation_spec = vim.vm.ConfigSpec()
+ annotation_spec.annotation = str(self.params['annotation'])
+ task = vm.ReconfigVM_Task(annotation_spec)
+ self.wait_for_task(task)
+ if wait_for_ip:
+ self.set_powerstate(vm, 'poweredon', force=False)
+ self.wait_for_vm_ip(vm)
+ vm_facts = self.gather_facts(vm)
+ return ({'changed': True, 'failed': False, 'instance': vm_facts})
+
+ def wait_for_task(self, task):
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Task.html
+ # https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.TaskInfo.html
+ # https://github.com/virtdevninja/pyvmomi-community-samples/blob/master/samples/tools/tasks.py
+ while task.info.state not in ['success', 'error']:
+ time.sleep(1)
+
+ def wait_for_vm_ip(self, vm, poll=100, sleep=5):
+ ips = None
+ facts = {}
+ thispoll = 0
+ while not ips and thispoll <= poll:
+ newvm = self.getvm(uuid=vm.config.uuid)
+ facts = self.gather_facts(newvm)
+ if facts['ipv4'] or facts['ipv6']:
+ ips = True
+ else:
+ time.sleep(sleep)
+ thispoll += 1
+
+ return facts
+
+
+ def fetch_file_from_guest(self, vm, username, password, src, dest):
+
+ ''' Use VMWare's filemanager api to fetch a file over http '''
+
+ result = {'failed': False}
+
+ tools_status = vm.guest.toolsStatus
+ if (tools_status == 'toolsNotInstalled' or
+ tools_status == 'toolsNotRunning'):
+ result['failed'] = True
+ result['msg'] = "VMwareTools is not installed or is not running in the guest"
+ return result
+
+ # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
+ creds = vim.vm.guest.NamePasswordAuthentication(
+ username=username, password=password
+ )
+
+ # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/FileManager/FileTransferInformation.rst
+ fti = self.content.guestOperationsManager.fileManager. \
+ InitiateFileTransferFromGuest(vm, creds, src)
+
+ result['size'] = fti.size
+ result['url'] = fti.url
+
+ # Use module_utils to fetch the remote url returned from the api
+ rsp, info = fetch_url(self.module, fti.url, use_proxy=False,
+ force=True, last_mod_time=None,
+ timeout=10, headers=None)
+
+ # save all of the transfer data
+ for k,v in info.iteritems():
+ result[k] = v
+
+ # exit early if xfer failed
+ if info['status'] != 200:
+ result['failed'] = True
+ return result
+
+ # attempt to read the content and write it
+ try:
+ with open(dest, 'wb') as f:
+ f.write(rsp.read())
+ except Exception as e:
+ result['failed'] = True
+ result['msg'] = str(e)
+
+ return result
+
+
+ def push_file_to_guest(self, vm, username, password, src, dest, overwrite=True):
+
+ ''' Use VMWare's filemanager api to push a file over http '''
+
+ result = {'failed': False}
+
+ tools_status = vm.guest.toolsStatus
+ if (tools_status == 'toolsNotInstalled' or
+ tools_status == 'toolsNotRunning'):
+ result['failed'] = True
+ result['msg'] = "VMwareTools is not installed or is not running in the guest"
+ return result
+
+ # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
+ creds = vim.vm.guest.NamePasswordAuthentication(
+ username=username, password=password
+ )
+
+ # the api requires a filesize in bytes
+ filesize = None
+ fdata = None
+ try:
+ #filesize = os.path.getsize(src)
+ filesize = os.stat(src).st_size
+ fdata = None
+ with open(src, 'rb') as f:
+ fdata = f.read()
+ result['local_filesize'] = filesize
+ except Exception as e:
+ result['failed'] = True
+ result['msg'] = "Unable to read src file: %s" % str(e)
+ return result
+
+ # https://www.vmware.com/support/developer/converter-sdk/conv60_apireference/vim.vm.guest.FileManager.html#initiateFileTransferToGuest
+ file_attribute = vim.vm.guest.FileManager.FileAttributes()
+ url = self.content.guestOperationsManager.fileManager. \
+ InitiateFileTransferToGuest(vm, creds, dest, file_attribute,
+ filesize, overwrite)
+
+ # PUT the filedata to the url ...
+ rsp, info = fetch_url(self.module, url, method="put", data=fdata,
+ use_proxy=False, force=True, last_mod_time=None,
+ timeout=10, headers=None)
+
+ result['msg'] = str(rsp.read())
+
+ # save all of the transfer data
+ for k,v in info.iteritems():
+ result[k] = v
+
+ return result
+
+
+ def run_command_in_guest(self, vm, username, password, program_path, program_args, program_cwd, program_env):
+
+ result = {'failed': False}
+
+ tools_status = vm.guest.toolsStatus
+ if (tools_status == 'toolsNotInstalled' or
+ tools_status == 'toolsNotRunning'):
+ result['failed'] = True
+ result['msg'] = "VMwareTools is not installed or is not running in the guest"
+ return result
+
+ # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst
+ creds = vim.vm.guest.NamePasswordAuthentication(
+ username=username, password=password
+ )
+
+ res = None
+ pdata = None
+ try:
+ # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/ProcessManager.rst
+ pm = self.content.guestOperationsManager.processManager
+ # https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/vim.vm.guest.ProcessManager.ProgramSpec.html
+ ps = vim.vm.guest.ProcessManager.ProgramSpec(
+ #programPath=program,
+ #arguments=args
+ programPath=program_path,
+ arguments=program_args,
+ workingDirectory=program_cwd,
+ )
+ res = pm.StartProgramInGuest(vm, creds, ps)
+ result['pid'] = res
+ pdata = pm.ListProcessesInGuest(vm, creds, [res])
+
+ # wait for pid to finish
+ while not pdata[0].endTime:
+ time.sleep(1)
+ pdata = pm.ListProcessesInGuest(vm, creds, [res])
+ result['owner'] = pdata[0].owner
+ result['startTime'] = pdata[0].startTime.isoformat()
+ result['endTime'] = pdata[0].endTime.isoformat()
+ result['exitCode'] = pdata[0].exitCode
+ if result['exitCode'] != 0:
+ result['failed'] = True
+ result['msg'] = "program exited non-zero"
+ else:
+ result['msg'] = "program completed successfully"
+
+ except Exception as e:
+ result['msg'] = str(e)
+ result['failed'] = True
+
+ return result
+
+ def list_snapshots_recursively(self, snapshots):
+ snapshot_data = []
+ snap_text = ''
+ for snapshot in snapshots:
+ snap_text = 'Id: %s; Name: %s; Description: %s; CreateTime: %s; State: %s'%(snapshot.id, snapshot.name,
+ snapshot.description, snapshot.createTime, snapshot.state)
+ snapshot_data.append(snap_text)
+ snapshot_data = snapshot_data + self.list_snapshots_recursively(snapshot.childSnapshotList)
+ return snapshot_data
+
+
+ def get_snapshots_by_name_recursively(self, snapshots, snapname):
+ snap_obj = []
+ for snapshot in snapshots:
+ if snapshot.name == snapname:
+ snap_obj.append(snapshot)
+ else:
+ snap_obj = snap_obj + self.get_snapshots_by_name_recursively(snapshot.childSnapshotList, snapname)
+ return snap_obj
+
+ def get_current_snap_obj(self, snapshots, snapob):
+ snap_obj = []
+ for snapshot in snapshots:
+ if snapshot.snapshot == snapob:
+ snap_obj.append(snapshot)
+ snap_obj = snap_obj + self.get_current_snap_obj(snapshot.childSnapshotList, snapob)
+ return snap_obj
+
+ def snapshot_vm(self, vm, guest, snapshot_op):
+ ''' To perform snapshot operations create/remove/revert/list_all/list_current/remove_all '''
+
+ try:
+ snapshot_op_name = snapshot_op['op_type']
+ except KeyError:
+ self.module.fail_json(msg="Specify op_type - create/remove/revert/list_all/list_current/remove_all")
+
+ task = None
+ result = {}
+
+ if snapshot_op_name not in ['create', 'remove', 'revert', 'list_all', 'list_current', 'remove_all']:
+ self.module.fail_json(msg="Specify op_type - create/remove/revert/list_all/list_current/remove_all")
+
+ if snapshot_op_name != 'create' and vm.snapshot is None:
+ self.module.exit_json(msg="VM - %s doesn't have any snapshots"%guest)
+
+ if snapshot_op_name == 'create':
+ try:
+ snapname = snapshot_op['name']
+ except KeyError:
+ self.module.fail_json(msg="specify name & description(optional) to create a snapshot")
+
+ if 'description' in snapshot_op:
+ snapdesc = snapshot_op['description']
+ else:
+ snapdesc = ''
+
+ dumpMemory = False
+ quiesce = False
+ task = vm.CreateSnapshot(snapname, snapdesc, dumpMemory, quiesce)
+
+ elif snapshot_op_name in ['remove', 'revert']:
+ try:
+ snapname = snapshot_op['name']
+ except KeyError:
+ self.module.fail_json(msg="specify snapshot name")
+
+ snap_obj = self.get_snapshots_by_name_recursively(vm.snapshot.rootSnapshotList, snapname)
+
+ #if len(snap_obj) is 0; then no snapshots with specified name
+ if len(snap_obj) == 1:
+ snap_obj = snap_obj[0].snapshot
+ if snapshot_op_name == 'remove':
+ task = snap_obj.RemoveSnapshot_Task(True)
+ else:
+ task = snap_obj.RevertToSnapshot_Task()
+ else:
+ self.module.exit_json(msg="Couldn't find any snapshots with specified name: %s on VM: %s"%(snapname, guest))
+
+ elif snapshot_op_name == 'list_all':
+ snapshot_data = self.list_snapshots_recursively(vm.snapshot.rootSnapshotList)
+ result['snapshot_data'] = snapshot_data
+
+ elif snapshot_op_name == 'list_current':
+ current_snapref = vm.snapshot.currentSnapshot
+ current_snap_obj = self.get_current_snap_obj(vm.snapshot.rootSnapshotList, current_snapref)
+ result['current_snapshot'] = 'Id: %s; Name: %s; Description: %s; CreateTime: %s; State: %s'%(current_snap_obj[0].id,
+ current_snap_obj[0].name, current_snap_obj[0].description, current_snap_obj[0].createTime,
+ current_snap_obj[0].state)
+
+ elif snapshot_op_name == 'remove_all':
+ task = vm.RemoveAllSnapshots()
+
+ if task:
+ self.wait_for_task(task)
+ if task.info.state == 'error':
+ result = {'changed': False, 'failed': True, 'msg': task.info.error.msg}
+ else:
+ result = {'changed': True, 'failed': False}
+
+ return result
+
+def get_obj(content, vimtype, name):
+ """
+ Return an object by name, if name is None the
+ first found object is returned
+ """
+ obj = None
+ container = content.viewManager.CreateContainerView(
+ content.rootFolder, vimtype, True)
+ for c in container.view:
+ if name:
+ if c.name == name:
+ obj = c
+ break
+ else:
+ obj = c
+ break
+
+ container.Destroy()
+ return obj
+
+
+def main():
+
+ vm = None
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ hostname=dict(
+ type='str',
+ default=os.environ.get('VMWARE_HOST')
+ ),
+ username=dict(
+ type='str',
+ default=os.environ.get('VMWARE_USER')
+ ),
+ password=dict(
+ type='str', no_log=True,
+ default=os.environ.get('VMWARE_PASSWORD')
+ ),
+ state=dict(
+ required=False,
+ choices=[
+ 'poweredon',
+ 'poweredoff',
+ 'present',
+ 'absent',
+ 'restarted',
+ 'reconfigured'
+ ],
+ default='present'),
+ validate_certs=dict(required=False, type='bool', default=True),
+ template_src=dict(required=False, type='str', aliases=['template']),
+ annotation=dict(required=False, type='str', aliases=['notes']),
+ name=dict(required=True, type='str'),
+ name_match=dict(required=False, type='str', default='first'),
+ snapshot_op=dict(required=False, type='dict', default={}),
+ uuid=dict(required=False, type='str'),
+ folder=dict(required=False, type='str', default='/vm', aliases=['folder']),
+ disk=dict(required=False, type='list'),
+ nic=dict(required=False, type='list'),
+ hardware=dict(required=False, type='dict', default={}),
+ force=dict(required=False, type='bool', default=False),
+ datacenter=dict(required=False, type='str', default=None),
+ esxi_hostname=dict(required=False, type='str', default=None),
+ cluster=dict(required=False, type='str', default=None),
+ wait_for_ip_address=dict(required=False, type='bool', default=True),
+ customize=dict(required=False, type='bool', default=False),
+ ips=dict(required=False, type='str', default=None),
+ dns_servers=dict(required=False, type='list', default=None),
+ domain=dict(required=False, type='str', default=None),
+ networks=dict(required=False, type='dict', default={})
+ ),
+ supports_check_mode=True,
+ mutually_exclusive=[],
+ required_together=[
+ ['state', 'force'],
+ ['template'],
+ ],
+ )
+
+ pyv = PyVmomiHelper(module)
+
+ # Check if the VM exists before continuing
+ vm = pyv.getvm(name=module.params['name'],
+ folder=module.params['folder'],
+ uuid=module.params['uuid'],
+ name_match=module.params['name_match'])
+
+ # VM already exists
+ if vm:
+
+ if module.params['state'] == 'absent':
+ # destroy it
+ if module.params['force']:
+ # has to be poweredoff first
+ result = pyv.set_powerstate(vm, 'poweredoff', module.params['force'])
+ result = pyv.remove_vm(vm)
+ elif module.params['state'] in ['poweredon', 'poweredoff', 'restarted']:
+ # set powerstate
+ result = pyv.set_powerstate(vm, module.params['state'], module.params['force'])
+ elif module.params['snapshot_op']:
+ result = pyv.snapshot_vm(vm, module.params['name'], module.params['snapshot_op'])
+ else:
+ # Run for facts only
+ try:
+ module.exit_json(instance=pyv.gather_facts(vm))
+ except Exception:
+ e = get_exception()
+ module.fail_json(
+ msg="Fact gather failed with exception %s" % e)
+
+ # VM doesn't exist
+ else:
+ create_states = ['poweredon', 'poweredoff', 'present', 'restarted']
+ if module.params['state'] in create_states:
+ poweron = (module.params['state'] != 'poweredoff')
+ # Create it ...
+ result = pyv.deploy_template(
+ poweron=poweron,
+ wait_for_ip=module.params['wait_for_ip_address']
+ )
+ result['changed'] = True
+ elif module.params['state'] == 'absent':
+ result = {'changed': False, 'failed': False}
+ else:
+ result = {'changed': False, 'failed': False}
+
+ # FIXME
+ if not 'failed' in result:
+ result['failed'] = False
+
+ if result['failed']:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_host.py b/lib/ansible/modules/cloud/vmware/vmware_host.py
new file mode 100644
index 0000000000..22cb82d55d
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_host.py
@@ -0,0 +1,229 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_host
+short_description: Add/remove ESXi host to/from vCenter
+description:
+ - This module can be used to add/remove an ESXi host to/from vCenter
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ datacenter_name:
+ description:
+ - Name of the datacenter to add the host
+ required: True
+ cluster_name:
+ description:
+ - Name of the cluster to add the host
+ required: True
+ esxi_hostname:
+ description:
+ - ESXi hostname to manage
+ required: True
+ esxi_username:
+ description:
+ - ESXi username
+ required: True
+ esxi_password:
+ description:
+ - ESXi password
+ required: True
+ state:
+ description:
+ - Add or remove the host
+ default: 'present'
+ choices:
+ - 'present'
+ - 'absent'
+ required: False
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+Example from Ansible playbook
+
+ - name: Add ESXi Host to VCSA
+ local_action:
+ module: vmware_host
+ hostname: vcsa_host
+ username: vcsa_user
+ password: vcsa_pass
+ datacenter_name: datacenter_name
+ cluster_name: cluster_name
+ esxi_hostname: esxi_hostname
+ esxi_username: esxi_username
+ esxi_password: esxi_password
+ state: present
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+class VMwareHost(object):
+ def __init__(self, module):
+ self.module = module
+ self.datacenter_name = module.params['datacenter_name']
+ self.cluster_name = module.params['cluster_name']
+ self.esxi_hostname = module.params['esxi_hostname']
+ self.esxi_username = module.params['esxi_username']
+ self.esxi_password = module.params['esxi_password']
+ self.state = module.params['state']
+ self.dc = None
+ self.cluster = None
+ self.host = None
+ self.content = connect_to_api(module)
+
+ def process_state(self):
+ try:
+ # Currently state_update_dvs is not implemented.
+ host_states = {
+ 'absent': {
+ 'present': self.state_remove_host,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'present': self.state_exit_unchanged,
+ 'absent': self.state_add_host,
+ }
+ }
+
+ host_states[self.state][self.check_host_state()]()
+
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+ def find_host_by_cluster_datacenter(self):
+ self.dc = find_datacenter_by_name(self.content, self.datacenter_name)
+ self.cluster = find_cluster_by_name_datacenter(self.dc, self.cluster_name)
+
+ for host in self.cluster.host:
+ if host.name == self.esxi_hostname:
+ return host, self.cluster
+
+ return None, self.cluster
+
+ def add_host_to_vcenter(self):
+ host_connect_spec = vim.host.ConnectSpec()
+ host_connect_spec.hostName = self.esxi_hostname
+ host_connect_spec.userName = self.esxi_username
+ host_connect_spec.password = self.esxi_password
+ host_connect_spec.force = True
+ host_connect_spec.sslThumbprint = ""
+ as_connected = True
+ esxi_license = None
+ resource_pool = None
+
+ try:
+ task = self.cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license)
+ success, result = wait_for_task(task)
+ return success, result
+ except TaskError as add_task_error:
+ # This is almost certain to fail the first time.
+ # In order to get the sslThumbprint we first connect
+ # get the vim.fault.SSLVerifyFault then grab the sslThumbprint
+ # from that object.
+ #
+ # args is a tuple, selecting the first tuple
+ ssl_verify_fault = add_task_error.args[0]
+ host_connect_spec.sslThumbprint = ssl_verify_fault.thumbprint
+
+ task = self.cluster.AddHost_Task(host_connect_spec, as_connected, resource_pool, esxi_license)
+ success, result = wait_for_task(task)
+ return success, result
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def state_remove_host(self):
+ changed = True
+ result = None
+ if not self.module.check_mode:
+ if not self.host.runtime.inMaintenanceMode:
+ maintenance_mode_task = self.host.EnterMaintenanceMode_Task(300, True, None)
+ changed, result = wait_for_task(maintenance_mode_task)
+
+ if changed:
+ task = self.host.Destroy_Task()
+ changed, result = wait_for_task(task)
+ else:
+ raise Exception(result)
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def state_update_host(self):
+ self.module.exit_json(changed=False, msg="Currently not implemented.")
+
+ def state_add_host(self):
+ changed = True
+ result = None
+
+ if not self.module.check_mode:
+ changed, result = self.add_host_to_vcenter()
+ self.module.exit_json(changed=changed, result=str(result))
+
+ def check_host_state(self):
+ self.host, self.cluster = self.find_host_by_cluster_datacenter()
+
+ if self.host is None:
+ return 'absent'
+ else:
+ return 'present'
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(datacenter_name=dict(required=True, type='str'),
+ cluster_name=dict(required=True, type='str'),
+ esxi_hostname=dict(required=True, type='str'),
+ esxi_username=dict(required=True, type='str'),
+ esxi_password=dict(required=True, type='str', no_log=True),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmware_host = VMwareHost(module)
+ vmware_host.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_local_user_manager.py b/lib/ansible/modules/cloud/vmware/vmware_local_user_manager.py
new file mode 100644
index 0000000000..ac52b57465
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_local_user_manager.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright IBM Corp. 2016
+# Author(s): Andreas Nafpliotis <nafpliot@de.ibm.com>
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_local_user_manager
+short_description: Manage local users on an ESXi host
+description:
+ - Manage local users on an ESXi host
+version_added: "2.2"
+author: Andreas Nafpliotis
+notes:
+ - Tested on ESXi 6.0
+ - Be sure that the ESXi user used for login, has the appropriate rights to create / delete / edit users
+requirements:
+ - "python >= 2.6"
+ - PyVmomi installed
+options:
+ local_user_name:
+ description:
+ - The local user name to be changed
+ required: True
+ local_user_password:
+ description:
+ - The password to be set
+ required: False
+ local_user_description:
+ description:
+ - Description for the user
+ required: False
+ state:
+ description:
+ - Indicate desired state of the user. If the user already exists when C(state=present), the user info is updated
+ choices: ['present', 'absent']
+ default: present
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example vmware_local_user_manager command from Ansible Playbooks
+- name: Add local user to ESXi
+ local_action:
+ module: vmware_local_user_manager
+ hostname: esxi_hostname
+ username: root
+ password: vmware
+ local_user_name: foo
+'''
+
+RETURN = '''# '''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+class VMwareLocalUserManager(object):
+ def __init__(self, module):
+ self.module = module
+ self.content = connect_to_api(self.module)
+ self.local_user_name = self.module.params['local_user_name']
+ self.local_user_password = self.module.params['local_user_password']
+ self.local_user_description = self.module.params['local_user_description']
+ self.state = self.module.params['state']
+
+ def process_state(self):
+ try:
+ local_account_manager_states = {
+ 'absent': {
+ 'present': self.state_remove_user,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'present': self.state_update_user,
+ 'absent': self.state_create_user,
+ }
+ }
+
+ local_account_manager_states[self.state][self.check_local_user_manager_state()]()
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+
+ def check_local_user_manager_state(self):
+ user_account = self.find_user_account()
+ if not user_account:
+ return 'absent'
+ else:
+ return 'present'
+
+
+ def find_user_account(self):
+ searchStr = self.local_user_name
+ exactMatch = True
+ findUsers = True
+ findGroups = False
+ user_account = self.content.userDirectory.RetrieveUserGroups(None, searchStr, None, None, exactMatch, findUsers, findGroups)
+ return user_account
+
+
+ def create_account_spec(self):
+ account_spec = vim.host.LocalAccountManager.AccountSpecification()
+ account_spec.id = self.local_user_name
+ account_spec.password = self.local_user_password
+ account_spec.description = self.local_user_description
+ return account_spec
+
+
+ def state_create_user(self):
+ account_spec = self.create_account_spec()
+
+ try:
+ task = self.content.accountManager.CreateUser(account_spec)
+ self.module.exit_json(changed=True)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+
+ def state_update_user(self):
+ account_spec = self.create_account_spec()
+
+ try:
+ task = self.content.accountManager.UpdateUser(account_spec)
+ self.module.exit_json(changed=True)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+
+
+ def state_remove_user(self):
+ try:
+ task = self.content.accountManager.RemoveUser(self.local_user_name)
+ self.module.exit_json(changed=True)
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(local_user_name=dict(required=True, type='str'),
+ local_user_password=dict(required=False, type='str', no_log=True),
+ local_user_description=dict(required=False, type='str'),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmware_local_user_manager = VMwareLocalUserManager(module)
+ vmware_local_user_manager.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_maintenancemode.py b/lib/ansible/modules/cloud/vmware/vmware_maintenancemode.py
new file mode 100644
index 0000000000..54e8958900
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_maintenancemode.py
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, VMware, Inc.
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_maintenancemode
+short_description: Place a host into maintenance mode
+description:
+ - Place an ESXI host into maintenance mode
+ - Support for VSAN compliant maintenance mode when selected
+author: "Jay Jahns <jjahns@vmware.com>"
+version_added: "2.1"
+notes:
+ - Tested on vSphere 5.5 and 6.0
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ esxi_hostname:
+ description:
+ - Name of the host as defined in vCenter
+ required: True
+ vsan_mode:
+ description:
+ - Specify which VSAN compliant mode to enter
+ choices:
+ - 'ensureObjectAccessibility'
+ - 'evacuateAllData'
+ - 'noAction'
+ required: False
+ evacuate:
+ description:
+ - If True, evacuate all powered off VMs
+ choices:
+ - True
+ - False
+ default: False
+ required: False
+ timeout:
+ description:
+ - Specify a timeout for the operation
+ required: False
+ default: 0
+ state:
+ description:
+ - Enter or exit maintenance mode
+ choices:
+ - present
+ - absent
+ default: present
+ required: False
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+- name: Enter VSAN-Compliant Maintenance Mode
+ local_action:
+ module: vmware_maintenancemode
+ hostname: vc_host
+ username: vc_user
+ password: vc_pass
+ esxi_hostname: esxi.host.example
+ vsan: ensureObjectAccessibility
+ evacuate: yes
+ timeout: 3600
+ state: present
+'''
+RETURN = '''
+hostsystem:
+ description: Name of vim reference
+ returned: always
+ type: string
+ sample: "'vim.HostSystem:host-236'"
+hostname:
+ description: Name of host in vCenter
+ returned: always
+ type: string
+ sample: "esxi.local.domain"
+status:
+ description: Action taken
+ return: always
+ type: string
+ sample: "ENTER"
+'''
+
+try:
+ from pyVmomi import vim
+ HAS_PYVMOMI = True
+
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def EnterMaintenanceMode(module, host):
+
+ if host.runtime.inMaintenanceMode:
+ module.exit_json(
+ changed=False,
+ hostsystem=str(host),
+ hostname=module.params['esxi_hostname'],
+ status='NO_ACTION',
+ msg='Host already in maintenance mode')
+
+ spec = vim.host.MaintenanceSpec()
+
+ if module.params['vsan']:
+ spec.vsanMode = vim.vsan.host.DecommissionMode()
+ spec.vsanMode.objectAction = module.params['vsan']
+
+ try:
+ task = host.EnterMaintenanceMode_Task(
+ module.params['timeout'],
+ module.params['evacuate'],
+ spec)
+
+ success, result = wait_for_task(task)
+
+ return dict(changed=success,
+ hostsystem=str(host),
+ hostname=module.params['esxi_hostname'],
+ status='ENTER',
+ msg='Host entered maintenance mode')
+
+ except TaskError:
+ module.fail_json(
+ msg='Host failed to enter maintenance mode')
+
+
+def ExitMaintenanceMode(module, host):
+ if not host.runtime.inMaintenanceMode:
+ module.exit_json(
+ changed=False,
+ hostsystem=str(host),
+ hostname=module.params['esxi_hostname'],
+ status='NO_ACTION',
+ msg='Host not in maintenance mode')
+
+ try:
+ task = host.ExitMaintenanceMode_Task(
+ module.params['timeout'])
+
+ success, result = wait_for_task(task)
+
+ return dict(changed=success,
+ hostsystem=str(host),
+ hostname=module.params['esxi_hostname'],
+ status='EXIT',
+ msg='Host exited maintenance mode')
+
+ except TaskError:
+ module.fail_json(
+ msg='Host failed to exit maintenance mode')
+
+
+def main():
+ spec = vmware_argument_spec()
+ spec.update(dict(
+ esxi_hostname=dict(required=True),
+ vsan=dict(required=False, choices=['ensureObjectAccessibility',
+ 'evacuateAllData',
+ 'noAction']),
+ evacuate=dict(required=False, type='bool', default=False),
+ timeout=dict(required=False, default=0, type='int'),
+ state=dict(required=False,
+ default='present',
+ choices=['present', 'absent'])))
+
+ module = AnsibleModule(argument_spec=spec)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ content = connect_to_api(module)
+ host = find_hostsystem_by_name(content, module.params['esxi_hostname'])
+
+ if not host:
+ module.fail_json(
+ msg='Host not found in vCenter')
+
+ if module.params['state'] == 'present':
+ result = EnterMaintenanceMode(module, host)
+
+ elif module.params['state'] == 'absent':
+ result = ExitMaintenanceMode(module, host)
+
+ module.exit_json(**result)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.vmware import *
+
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_migrate_vmk.py b/lib/ansible/modules/cloud/vmware/vmware_migrate_vmk.py
new file mode 100644
index 0000000000..730102c204
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_migrate_vmk.py
@@ -0,0 +1,200 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_migrate_vmk
+short_description: Migrate a VMK interface from VSS to VDS
+description:
+ - Migrate a VMK interface from VSS to VDS
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ esxi_hostname:
+ description:
+ - ESXi hostname to be managed
+ required: True
+ device:
+ description:
+ - VMK interface name
+ required: True
+ current_switch_name:
+ description:
+ - Switch VMK interface is currently on
+ required: True
+ current_portgroup_name:
+ description:
+ - Portgroup name VMK interface is currently on
+ required: True
+ migrate_switch_name:
+ description:
+ - Switch name to migrate VMK interface to
+ required: True
+ migrate_portgroup_name:
+ description:
+ - Portgroup name to migrate VMK interface to
+ required: True
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+Example from Ansible playbook
+
+ - name: Migrate Management vmk
+ local_action:
+ module: vmware_migrate_vmk
+ hostname: vcsa_host
+ username: vcsa_user
+ password: vcsa_pass
+ esxi_hostname: esxi_hostname
+ device: vmk1
+ current_switch_name: temp_vswitch
+ current_portgroup_name: esx-mgmt
+ migrate_switch_name: dvSwitch
+ migrate_portgroup_name: Management
+'''
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+class VMwareMigrateVmk(object):
+ def __init__(self, module):
+ self.module = module
+ self.host_system = None
+ self.migrate_switch_name = self.module.params['migrate_switch_name']
+ self.migrate_portgroup_name = self.module.params['migrate_portgroup_name']
+ self.device = self.module.params['device']
+ self.esxi_hostname = self.module.params['esxi_hostname']
+ self.current_portgroup_name = self.module.params['current_portgroup_name']
+ self.current_switch_name = self.module.params['current_switch_name']
+ self.content = connect_to_api(module)
+
+ def process_state(self):
+ try:
+ vmk_migration_states = {
+ 'migrate_vss_vds': self.state_migrate_vss_vds,
+ 'migrate_vds_vss': self.state_migrate_vds_vss,
+ 'migrated': self.state_exit_unchanged
+ }
+
+ vmk_migration_states[self.check_vmk_current_state()]()
+
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def state_migrate_vds_vss(self):
+ self.module.exit_json(changed=False, msg="Currently Not Implemented")
+
+ def create_host_vnic_config(self, dv_switch_uuid, portgroup_key):
+ host_vnic_config = vim.host.VirtualNic.Config()
+ host_vnic_config.spec = vim.host.VirtualNic.Specification()
+
+ host_vnic_config.changeOperation = "edit"
+ host_vnic_config.device = self.device
+ host_vnic_config.portgroup = ""
+ host_vnic_config.spec.distributedVirtualPort = vim.dvs.PortConnection()
+ host_vnic_config.spec.distributedVirtualPort.switchUuid = dv_switch_uuid
+ host_vnic_config.spec.distributedVirtualPort.portgroupKey = portgroup_key
+
+ return host_vnic_config
+
+ def create_port_group_config(self):
+ port_group_config = vim.host.PortGroup.Config()
+ port_group_config.spec = vim.host.PortGroup.Specification()
+
+ port_group_config.changeOperation = "remove"
+ port_group_config.spec.name = self.current_portgroup_name
+ port_group_config.spec.vlanId = -1
+ port_group_config.spec.vswitchName = self.current_switch_name
+ port_group_config.spec.policy = vim.host.NetworkPolicy()
+
+ return port_group_config
+
+ def state_migrate_vss_vds(self):
+ host_network_system = self.host_system.configManager.networkSystem
+
+ dv_switch = find_dvs_by_name(self.content, self.migrate_switch_name)
+ pg = find_dvspg_by_name(dv_switch, self.migrate_portgroup_name)
+
+ config = vim.host.NetworkConfig()
+ config.portgroup = [self.create_port_group_config()]
+ config.vnic = [self.create_host_vnic_config(dv_switch.uuid, pg.key)]
+ host_network_system.UpdateNetworkConfig(config, "modify")
+ self.module.exit_json(changed=True)
+
+ def check_vmk_current_state(self):
+ self.host_system = find_hostsystem_by_name(self.content, self.esxi_hostname)
+
+ for vnic in self.host_system.configManager.networkSystem.networkInfo.vnic:
+ if vnic.device == self.device:
+ #self.vnic = vnic
+ if vnic.spec.distributedVirtualPort is None:
+ if vnic.portgroup == self.current_portgroup_name:
+ return "migrate_vss_vds"
+ else:
+ dvs = find_dvs_by_name(self.content, self.current_switch_name)
+ if dvs is None:
+ return "migrated"
+ if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid:
+ return "migrate_vds_vss"
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(esxi_hostname=dict(required=True, type='str'),
+ device=dict(required=True, type='str'),
+ current_switch_name=dict(required=True, type='str'),
+ current_portgroup_name=dict(required=True, type='str'),
+ migrate_switch_name=dict(required=True, type='str'),
+ migrate_portgroup_name=dict(required=True, type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ self.module.fail_json(msg='pyvmomi required for this module')
+
+ vmware_migrate_vmk = VMwareMigrateVmk(module)
+ vmware_migrate_vmk.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_portgroup.py b/lib/ansible/modules/cloud/vmware/vmware_portgroup.py
new file mode 100644
index 0000000000..089d584d03
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_portgroup.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_portgroup
+short_description: Create a VMware portgroup
+description:
+ - Create a VMware portgroup
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ switch_name:
+ description:
+ - vSwitch to modify
+ required: True
+ portgroup_name:
+ description:
+ - Portgroup name to add
+ required: True
+ vlan_id:
+ description:
+ - VLAN ID to assign to portgroup
+ required: True
+ network_policy:
+ description:
+ - Network policy specifies layer 2 security settings for a
+ portgroup such as promiscuous mode, where guest adapter listens
+ to all the packets, MAC address changes and forged transmits.
+ Settings are promiscuous_mode, forged_transmits, mac_changes
+ required: False
+ version_added: "2.2"
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+Example from Ansible playbook
+
+ - name: Add Management Network VM Portgroup
+ local_action:
+ module: vmware_portgroup
+ hostname: esxi_hostname
+ username: esxi_username
+ password: esxi_password
+ switch_name: vswitch_name
+ portgroup_name: portgroup_name
+ vlan_id: vlan_id
+
+ - name: Add Portgroup with Promiscuous Mode Enabled
+ local_action:
+ module: vmware_portgroup
+ hostname: esxi_hostname
+ username: esxi_username
+ password: esxi_password
+ switch_name: vswitch_name
+ portgroup_name: portgroup_name
+ network_policy:
+ promiscuous_mode: True
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def create_network_policy(promiscuous_mode, forged_transmits, mac_changes):
+
+ security_policy = vim.host.NetworkPolicy.SecurityPolicy()
+ if promiscuous_mode:
+ security_policy.allowPromiscuous = promiscuous_mode
+ if forged_transmits:
+ security_policy.forgedTransmits = forged_transmits
+ if mac_changes:
+ security_policy.macChanges = mac_changes
+ network_policy = vim.host.NetworkPolicy(security=security_policy)
+ return network_policy
+
+
+def create_port_group(host_system, portgroup_name, vlan_id, vswitch_name, network_policy):
+
+ config = vim.host.NetworkConfig()
+ config.portgroup = [vim.host.PortGroup.Config()]
+ config.portgroup[0].changeOperation = "add"
+ config.portgroup[0].spec = vim.host.PortGroup.Specification()
+ config.portgroup[0].spec.name = portgroup_name
+ config.portgroup[0].spec.vlanId = vlan_id
+ config.portgroup[0].spec.vswitchName = vswitch_name
+ config.portgroup[0].spec.policy = network_policy
+
+ host_network_config_result = host_system.configManager.networkSystem.UpdateNetworkConfig(config, "modify")
+ return True
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(portgroup_name=dict(required=True, type='str'),
+ switch_name=dict(required=True, type='str'),
+ vlan_id=dict(required=True, type='int'),
+ network_policy=dict(required=False, type='dict', default={})))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ portgroup_name = module.params['portgroup_name']
+ switch_name = module.params['switch_name']
+ vlan_id = module.params['vlan_id']
+ promiscuous_mode = module.params['network_policy'].get('promiscuous_mode', None)
+ forged_transmits = module.params['network_policy'].get('forged_transmits', None)
+ mac_changes = module.params['network_policy'].get('mac_changes', None)
+
+ try:
+ content = connect_to_api(module)
+ host = get_all_objs(content, [vim.HostSystem])
+ if not host:
+ raise SystemExit("Unable to locate Physical Host.")
+ host_system = host.keys()[0]
+
+ if find_host_portgroup_by_name(host_system, portgroup_name):
+ module.exit_json(changed=False)
+
+ network_policy = create_network_policy(promiscuous_mode, forged_transmits, mac_changes)
+ changed = create_port_group(host_system, portgroup_name, vlan_id, switch_name, network_policy)
+
+ module.exit_json(changed=changed)
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_target_canonical_facts.py b/lib/ansible/modules/cloud/vmware/vmware_target_canonical_facts.py
new file mode 100644
index 0000000000..817d736d3a
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_target_canonical_facts.py
@@ -0,0 +1,99 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_target_canonical_facts
+short_description: Return canonical (NAA) from an ESXi host
+description:
+ - Return canonical (NAA) from an ESXi host based on SCSI target ID
+version_added: "2.0"
+author: Joseph Callen
+notes:
+requirements:
+ - Tested on vSphere 5.5
+ - PyVmomi installed
+options:
+ target_id:
+ description:
+ - The target id based on order of scsi device
+ required: True
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example vmware_target_canonical_facts command from Ansible Playbooks
+- name: Get Canonical name
+ local_action: >
+ vmware_target_canonical_facts
+ hostname="{{ ansible_ssh_host }}" username=root password=vmware
+ target_id=7
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def find_hostsystem(content):
+ host_system = get_all_objs(content, [vim.HostSystem])
+ for host in host_system:
+ return host
+ return None
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(target_id=dict(required=True, type='int')))
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ content = connect_to_api(module)
+ host = find_hostsystem(content)
+
+ target_lun_uuid = {}
+ scsilun_canonical = {}
+
+ # Associate the scsiLun key with the canonicalName (NAA)
+ for scsilun in host.config.storageDevice.scsiLun:
+ scsilun_canonical[scsilun.key] = scsilun.canonicalName
+
+ # Associate target number with LUN uuid
+ for target in host.config.storageDevice.scsiTopology.adapter[0].target:
+ for lun in target.lun:
+ target_lun_uuid[target.target] = lun.scsiLun
+
+ module.exit_json(changed=False, canonical=scsilun_canonical[target_lun_uuid[module.params['target_id']]])
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.vmware import *
+
+if __name__ == '__main__':
+ main()
+
diff --git a/lib/ansible/modules/cloud/vmware/vmware_vm_facts.py b/lib/ansible/modules/cloud/vmware/vmware_vm_facts.py
new file mode 100644
index 0000000000..46de7a3915
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_vm_facts.py
@@ -0,0 +1,105 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_vm_facts
+short_description: Return basic facts pertaining to a vSphere virtual machine guest
+description:
+ - Return basic facts pertaining to a vSphere virtual machine guest
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+- name: Gather all registered virtual machines
+ local_action:
+ module: vmware_vm_facts
+ hostname: esxi_or_vcenter_ip_or_hostname
+ username: username
+ password: password
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/getallvms.py
+def get_all_virtual_machines(content):
+ virtual_machines = get_all_objs(content, [vim.VirtualMachine])
+ _virtual_machines = {}
+
+ for vm in virtual_machines:
+ _ip_address = ""
+ summary = vm.summary
+ if summary.guest is not None:
+ _ip_address = summary.guest.ipAddress
+ if _ip_address is None:
+ _ip_address = ""
+
+ virtual_machine = {
+ summary.config.name: {
+ "guest_fullname": summary.config.guestFullName,
+ "power_state": summary.runtime.powerState,
+ "ip_address": _ip_address
+ }
+ }
+
+ _virtual_machines.update(virtual_machine)
+ return _virtual_machines
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ try:
+ content = connect_to_api(module)
+ _virtual_machines = get_all_virtual_machines(content)
+ module.exit_json(changed=False, virtual_machines=_virtual_machines)
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_vm_shell.py b/lib/ansible/modules/cloud/vmware/vmware_vm_shell.py
new file mode 100644
index 0000000000..34eb6b0f44
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_vm_shell.py
@@ -0,0 +1,190 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, 2016 Ritesh Khadgaray <khadgaray () gmail.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_vm_shell
+short_description: Execute a process in VM
+description:
+ - Start a program in a VM without the need for network connection
+version_added: 2.1
+author: "Ritesh Khadgaray (@ritzk)"
+notes:
+ - Tested on vSphere 5.5
+ - Only the first match against vm_id is used, even if there are multiple matches
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ datacenter:
+ description:
+ - The datacenter hosting the VM
+ - Will help speed up search
+ required: False
+ default: None
+ cluster:
+ description:
+ - The cluster hosting the VM
+ - Will help speed up search
+ required: False
+ default: None
+ vm_id:
+ description:
+ - The identification for the VM
+ required: True
+ vm_id_type:
+ description:
+ - The identification tag for the VM
+ default: vm_name
+ choices:
+ - 'uuid'
+ - 'dns_name'
+ - 'inventory_path'
+ - 'vm_name'
+ required: False
+ vm_username:
+ description:
+ - The user to connect to the VM.
+ required: False
+ default: None
+ vm_password:
+ description:
+ - The password used to login to the VM.
+ required: False
+ default: None
+ vm_shell:
+ description:
+ - The absolute path to the program to start. On Linux this is executed via bash.
+ required: True
+ vm_shell_args:
+ description:
+ - The argument to the program.
+ required: False
+ default: None
+ vm_shell_env:
+ description:
+ - Comma seperated list of envirnoment variable, specified in the guest OS notation
+ required: False
+ default: None
+ vm_shell_cwd:
+ description:
+ - The current working directory of the application from which it will be run
+ required: False
+ default: None
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+ - name: shell execution
+ local_action:
+ module: vmware_vm_shell
+ hostname: myVSphere
+ username: myUsername
+ password: mySecret
+ datacenter: myDatacenter
+ vm_id: NameOfVM
+ vm_username: root
+ vm_password: superSecret
+ vm_shell: /bin/echo
+ vm_shell_args: " $var >> myFile "
+ vm_shell_env:
+ - "PATH=/bin"
+ - "VAR=test"
+ vm_shell_cwd: "/tmp"
+
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/execute_program_in_vm.py
+def execute_command(content, vm, vm_username, vm_password, program_path, args="", env=None, cwd=None):
+
+ creds = vim.vm.guest.NamePasswordAuthentication(username=vm_username, password=vm_password)
+ cmdspec = vim.vm.guest.ProcessManager.ProgramSpec(arguments=args, envVariables=env, programPath=program_path, workingDirectory=cwd)
+ cmdpid = content.guestOperationsManager.processManager.StartProgramInGuest(vm=vm, auth=creds, spec=cmdspec)
+
+ return cmdpid
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(datacenter=dict(default=None, type='str'),
+ cluster=dict(default=None, type='str'),
+ vm_id=dict(required=True, type='str'),
+ vm_id_type=dict(default='vm_name', type='str', choices=['inventory_path', 'uuid', 'dns_name', 'vm_name']),
+ vm_username=dict(required=False, type='str'),
+ vm_password=dict(required=False, type='str', no_log=True),
+ vm_shell=dict(required=True, type='str'),
+ vm_shell_args=dict(default=" ", type='str'),
+ vm_shell_env=dict(default=None, type='list'),
+ vm_shell_cwd=dict(default=None, type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(changed=False, msg='pyvmomi is required for this module')
+
+
+ try:
+ p = module.params
+ datacenter_name = p['datacenter']
+ cluster_name = p['cluster']
+ content = connect_to_api(module)
+
+ datacenter = None
+ if datacenter_name:
+ datacenter = find_datacenter_by_name(content, datacenter_name)
+ if not datacenter:
+ module.fail_json(changed=False, msg="datacenter not found")
+
+ cluster = None
+ if cluster_name:
+ cluster = find_cluster_by_name(content, cluster_name, datacenter)
+ if not cluster:
+ module.fail_json(changed=False, msg="cluster not found")
+
+ vm = find_vm_by_id(content, p['vm_id'], p['vm_id_type'], datacenter, cluster)
+ if not vm:
+ module.fail_json(msg='VM not found')
+
+ msg = execute_command(content, vm, p['vm_username'], p['vm_password'],
+ p['vm_shell'], p['vm_shell_args'], p['vm_shell_env'], p['vm_shell_cwd'])
+
+ module.exit_json(changed=True, uuid=vm.summary.config.uuid, msg=msg)
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(changed=False, msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(changed=False, msg=method_fault.msg)
+ except Exception as e:
+ module.fail_json(changed=False, msg=str(e))
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_vm_vss_dvs_migrate.py b/lib/ansible/modules/cloud/vmware/vmware_vm_vss_dvs_migrate.py
new file mode 100644
index 0000000000..594a9e1783
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_vm_vss_dvs_migrate.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_vm_vss_dvs_migrate
+short_description: Migrates a virtual machine from a standard vswitch to distributed
+description:
+ - Migrates a virtual machine from a standard vswitch to distributed
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ vm_name:
+ description:
+ - Name of the virtual machine to migrate to a dvSwitch
+ required: True
+ dvportgroup_name:
+ description:
+ - Name of the portgroup to migrate to the virtual machine to
+ required: True
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+- name: Migrate VCSA to vDS
+ local_action:
+ module: vmware_vm_vss_dvs_migrate
+ hostname: vcenter_ip_or_hostname
+ username: vcenter_username
+ password: vcenter_password
+ vm_name: virtual_machine_name
+ dvportgroup_name: distributed_portgroup_name
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+class VMwareVmVssDvsMigrate(object):
+ def __init__(self, module):
+ self.module = module
+ self.content = connect_to_api(module)
+ self.vm = None
+ self.vm_name = module.params['vm_name']
+ self.dvportgroup_name = module.params['dvportgroup_name']
+
+ def process_state(self):
+ vm_nic_states = {
+ 'absent': self.migrate_network_adapter_vds,
+ 'present': self.state_exit_unchanged,
+ }
+
+ vm_nic_states[self.check_vm_network_state()]()
+
+ def find_dvspg_by_name(self):
+ vmware_distributed_port_group = get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
+ for dvspg in vmware_distributed_port_group:
+ if dvspg.name == self.dvportgroup_name:
+ return dvspg
+ return None
+
+ def find_vm_by_name(self):
+ virtual_machines = get_all_objs(self.content, [vim.VirtualMachine])
+ for vm in virtual_machines:
+ if vm.name == self.vm_name:
+ return vm
+ return None
+
+ def migrate_network_adapter_vds(self):
+ vm_configspec = vim.vm.ConfigSpec()
+ nic = vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
+ port = vim.dvs.PortConnection()
+ devicespec = vim.vm.device.VirtualDeviceSpec()
+
+ pg = self.find_dvspg_by_name()
+
+ if pg is None:
+ self.module.fail_json(msg="The standard portgroup was not found")
+
+ dvswitch = pg.config.distributedVirtualSwitch
+ port.switchUuid = dvswitch.uuid
+ port.portgroupKey = pg.key
+ nic.port = port
+
+ for device in self.vm.config.hardware.device:
+ if isinstance(device, vim.vm.device.VirtualEthernetCard):
+ devicespec.device = device
+ devicespec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+ devicespec.device.backing = nic
+ vm_configspec.deviceChange.append(devicespec)
+
+ task = self.vm.ReconfigVM_Task(vm_configspec)
+ changed, result = wait_for_task(task)
+ self.module.exit_json(changed=changed, result=result)
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def check_vm_network_state(self):
+ try:
+ self.vm = self.find_vm_by_name()
+
+ if self.vm is None:
+ self.module.fail_json(msg="A virtual machine with name %s does not exist" % self.vm_name)
+ for device in self.vm.config.hardware.device:
+ if isinstance(device, vim.vm.device.VirtualEthernetCard):
+ if isinstance(device.backing, vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo):
+ return 'present'
+ return 'absent'
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(vm_name=dict(required=True, type='str'),
+ dvportgroup_name=dict(required=True, type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmware_vmnic_migrate = VMwareVmVssDvsMigrate(module)
+ vmware_vmnic_migrate.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main() \ No newline at end of file
diff --git a/lib/ansible/modules/cloud/vmware/vmware_vmkernel.py b/lib/ansible/modules/cloud/vmware/vmware_vmkernel.py
new file mode 100644
index 0000000000..238b85ea34
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_vmkernel.py
@@ -0,0 +1,212 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_vmkernel
+short_description: Create a VMware VMkernel Interface
+description:
+ - Create a VMware VMkernel Interface
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ vswitch_name:
+ description:
+ - The name of the vswitch where to add the VMK interface
+ required: True
+ portgroup_name:
+ description:
+ - The name of the portgroup for the VMK interface
+ required: True
+ ip_address:
+ description:
+ - The IP Address for the VMK interface
+ required: True
+ subnet_mask:
+ description:
+ - The Subnet Mask for the VMK interface
+ required: True
+ vland_id:
+ description:
+ - The VLAN ID for the VMK interface
+ required: True
+ mtu:
+ description:
+ - The MTU for the VMK interface
+ required: False
+ enable_vsan:
+ description:
+ - Enable the VMK interface for VSAN traffic
+ required: False
+ enable_vmotion:
+ description:
+ - Enable the VMK interface for vMotion traffic
+ required: False
+ enable_mgmt:
+ description:
+ - Enable the VMK interface for Management traffic
+ required: False
+ enable_ft:
+ description:
+ - Enable the VMK interface for Fault Tolerance traffic
+ required: False
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example command from Ansible Playbook
+
+- name: Add Management vmkernel port (vmk1)
+ local_action:
+ module: vmware_vmkernel
+ hostname: esxi_hostname
+ username: esxi_username
+ password: esxi_password
+ vswitch_name: vswitch_name
+ portgroup_name: portgroup_name
+ vlan_id: vlan_id
+ ip_address: ip_address
+ subnet_mask: subnet_mask
+ enable_mgmt: True
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def create_vmkernel_adapter(host_system, port_group_name,
+ vlan_id, vswitch_name,
+ ip_address, subnet_mask,
+ mtu, enable_vsan, enable_vmotion, enable_mgmt, enable_ft):
+
+ host_config_manager = host_system.configManager
+ host_network_system = host_config_manager.networkSystem
+ host_virtual_vic_manager = host_config_manager.virtualNicManager
+ config = vim.host.NetworkConfig()
+
+ config.portgroup = [vim.host.PortGroup.Config()]
+ config.portgroup[0].changeOperation = "add"
+ config.portgroup[0].spec = vim.host.PortGroup.Specification()
+ config.portgroup[0].spec.name = port_group_name
+ config.portgroup[0].spec.vlanId = vlan_id
+ config.portgroup[0].spec.vswitchName = vswitch_name
+ config.portgroup[0].spec.policy = vim.host.NetworkPolicy()
+
+ config.vnic = [vim.host.VirtualNic.Config()]
+ config.vnic[0].changeOperation = "add"
+ config.vnic[0].portgroup = port_group_name
+ config.vnic[0].spec = vim.host.VirtualNic.Specification()
+ config.vnic[0].spec.ip = vim.host.IpConfig()
+ config.vnic[0].spec.ip.dhcp = False
+ config.vnic[0].spec.ip.ipAddress = ip_address
+ config.vnic[0].spec.ip.subnetMask = subnet_mask
+ if mtu:
+ config.vnic[0].spec.mtu = mtu
+
+ host_network_config_result = host_network_system.UpdateNetworkConfig(config, "modify")
+
+ for vnic_device in host_network_config_result.vnicDevice:
+ if enable_vsan:
+ vsan_system = host_config_manager.vsanSystem
+ vsan_config = vim.vsan.host.ConfigInfo()
+ vsan_config.networkInfo = vim.vsan.host.ConfigInfo.NetworkInfo()
+
+ vsan_config.networkInfo.port = [vim.vsan.host.ConfigInfo.NetworkInfo.PortConfig()]
+
+ vsan_config.networkInfo.port[0].device = vnic_device
+ host_vsan_config_result = vsan_system.UpdateVsan_Task(vsan_config)
+
+ if enable_vmotion:
+ host_virtual_vic_manager.SelectVnicForNicType("vmotion", vnic_device)
+
+ if enable_mgmt:
+ host_virtual_vic_manager.SelectVnicForNicType("management", vnic_device)
+
+ if enable_ft:
+ host_virtual_vic_manager.SelectVnicForNicType("faultToleranceLogging", vnic_device)
+ return True
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(portgroup_name=dict(required=True, type='str'),
+ ip_address=dict(required=True, type='str'),
+ subnet_mask=dict(required=True, type='str'),
+ mtu=dict(required=False, type='int'),
+ enable_vsan=dict(required=False, type='bool'),
+ enable_vmotion=dict(required=False, type='bool'),
+ enable_mgmt=dict(required=False, type='bool'),
+ enable_ft=dict(required=False, type='bool'),
+ vswitch_name=dict(required=True, type='str'),
+ vlan_id=dict(required=True, type='int')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ port_group_name = module.params['portgroup_name']
+ ip_address = module.params['ip_address']
+ subnet_mask = module.params['subnet_mask']
+ mtu = module.params['mtu']
+ enable_vsan = module.params['enable_vsan']
+ enable_vmotion = module.params['enable_vmotion']
+ enable_mgmt = module.params['enable_mgmt']
+ enable_ft = module.params['enable_ft']
+ vswitch_name = module.params['vswitch_name']
+ vlan_id = module.params['vlan_id']
+
+ try:
+ content = connect_to_api(module)
+ host = get_all_objs(content, [vim.HostSystem])
+ if not host:
+ module.fail_json(msg="Unable to locate Physical Host.")
+ host_system = host.keys()[0]
+ changed = create_vmkernel_adapter(host_system, port_group_name,
+ vlan_id, vswitch_name,
+ ip_address, subnet_mask,
+ mtu, enable_vsan, enable_vmotion, enable_mgmt, enable_ft)
+ module.exit_json(changed=changed)
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_vmkernel_ip_config.py b/lib/ansible/modules/cloud/vmware/vmware_vmkernel_ip_config.py
new file mode 100644
index 0000000000..fe545e356d
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_vmkernel_ip_config.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_vmkernel_ip_config
+short_description: Configure the VMkernel IP Address
+description:
+ - Configure the VMkernel IP Address
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ vmk_name:
+ description:
+ - VMkernel interface name
+ required: True
+ ip_address:
+ description:
+ - IP address to assign to VMkernel interface
+ required: True
+ subnet_mask:
+ description:
+ - Subnet Mask to assign to VMkernel interface
+ required: True
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example command from Ansible Playbook
+
+- name: Configure IP address on ESX host
+ local_action:
+ module: vmware_vmkernel_ip_config
+ hostname: esxi_hostname
+ username: esxi_username
+ password: esxi_password
+ vmk_name: vmk0
+ ip_address: 10.0.0.10
+ subnet_mask: 255.255.255.0
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask):
+
+ host_config_manager = host_system.configManager
+ host_network_system = host_config_manager.networkSystem
+
+ for vnic in host_network_system.networkConfig.vnic:
+ if vnic.device == vmk_name:
+ spec = vnic.spec
+ if spec.ip.ipAddress != ip_address:
+ spec.ip.dhcp = False
+ spec.ip.ipAddress = ip_address
+ spec.ip.subnetMask = subnet_mask
+ host_network_system.UpdateVirtualNic(vmk_name, spec)
+ return True
+ return False
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(vmk_name=dict(required=True, type='str'),
+ ip_address=dict(required=True, type='str'),
+ subnet_mask=dict(required=True, type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ vmk_name = module.params['vmk_name']
+ ip_address = module.params['ip_address']
+ subnet_mask = module.params['subnet_mask']
+
+ try:
+ content = connect_to_api(module, False)
+ host = get_all_objs(content, [vim.HostSystem])
+ if not host:
+ module.fail_json(msg="Unable to locate Physical Host.")
+ host_system = host.keys()[0]
+ changed = configure_vmkernel_ip_address(host_system, vmk_name, ip_address, subnet_mask)
+ module.exit_json(changed=changed)
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_vmotion.py b/lib/ansible/modules/cloud/vmware/vmware_vmotion.py
new file mode 100644
index 0000000000..0ceaf59787
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_vmotion.py
@@ -0,0 +1,154 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Bede Carroll <bc+github () bedecarroll.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_vmotion
+short_description: Move a virtual machine using vMotion
+description:
+ - Using VMware vCenter, move a virtual machine using vMotion to a different
+ host.
+version_added: 2.2
+author: "Bede Carroll (@bedecarroll)"
+notes:
+ - Tested on vSphere 6.0
+requirements:
+ - "python >= 2.6"
+ - pyVmomi
+options:
+ vm_name:
+ description:
+ - Name of the VM to perform a vMotion on
+ required: True
+ aliases: ['vm']
+ destination_host:
+ description:
+ - Name of the end host the VM should be running on
+ required: True
+ aliases: ['destination']
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+Example from Ansible playbook
+
+ - name: Perform vMotion of VM
+ local_action:
+ module: vmware_vmotion
+ hostname: 'vcenter_hostname'
+ username: 'vcenter_username'
+ password: 'vcenter_password'
+ validate_certs: False
+ vm_name: 'vm_name_as_per_vcenter'
+ destination_host: 'destination_host_as_per_vcenter'
+'''
+
+RETURN = '''
+running_host:
+ description: List the host the virtual machine is registered to
+ returned:
+ - changed
+ - success
+ type: string
+ sample: 'host1.example.com'
+'''
+
+try:
+ from pyVmomi import vim
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def migrate_vm(vm_object, host_object):
+ """
+ Migrate virtual machine and return the task.
+ """
+ relocate_spec = vim.vm.RelocateSpec(host=host_object)
+ task_object = vm_object.Relocate(relocate_spec)
+ return task_object
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(
+ dict(
+ vm_name=dict(required=True, aliases=['vm'], type='str'),
+ destination_host=dict(required=True, aliases=['destination'], type='str'),
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyVmomi is required for this module')
+
+ content = connect_to_api(module=module)
+
+ vm_object = find_vm_by_name(content=content, vm_name=module.params['vm_name'])
+ host_object = find_hostsystem_by_name(content=content, hostname=module.params['destination_host'])
+
+ # Setup result
+ result = {
+ 'changed': False
+ }
+
+ # Check if we could find the VM or Host
+ if not vm_object:
+ module.fail_json(msg='Cannot find virtual machine')
+ if not host_object:
+ module.fail_json(msg='Cannot find host')
+
+ # Make sure VM isn't already at the destination
+ if vm_object.runtime.host.name == module.params['destination_host']:
+ module.exit_json(**result)
+
+ if not module.check_mode:
+ # Migrate VM and get Task object back
+ task_object = migrate_vm(vm_object=vm_object, host_object=host_object)
+
+ # Wait for task to complete
+ wait_for_task(task_object)
+
+ # If task was a success the VM has moved, update running_host and complete module
+ if task_object.info.state == vim.TaskInfo.State.success:
+ vm_object = find_vm_by_name(content=content, vm_name=module.params['vm_name'])
+ result['running_host'] = vm_object.runtime.host.name
+ result['changed'] = True
+ module.exit_json(**result)
+ else:
+ if task_object.info.error is None:
+ module.fail_json(msg='Unable to migrate VM due to an error, please check vCenter')
+ else:
+ module.fail_json(msg='Unable to migrate VM due to an error: %s' % task_object.info.error)
+ else:
+ # If we are in check mode return a result as if move was performed
+ result['running_host'] = module.params['destination_host']
+ result['changed'] = True
+ module.exit_json(**result)
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.vmware import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_vsan_cluster.py b/lib/ansible/modules/cloud/vmware/vmware_vsan_cluster.py
new file mode 100644
index 0000000000..714f6f22ff
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_vsan_cluster.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Russell Teague <rteague2 () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_vsan_cluster
+short_description: Configure VSAN clustering on an ESXi host
+description:
+ - This module can be used to configure VSAN clustering on an ESXi host
+version_added: 2.0
+author: "Russell Teague (@mtnbikenc)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ cluster_uuid:
+ description:
+ - Desired cluster UUID
+ required: False
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+# Example command from Ansible Playbook
+
+- name: Configure VMware VSAN Cluster
+ hosts: deploy_node
+ gather_facts: False
+ tags:
+ - vsan
+ tasks:
+ - name: Configure VSAN on first host
+ vmware_vsan_cluster:
+ hostname: "{{ groups['esxi'][0] }}"
+ username: "{{ esxi_username }}"
+ password: "{{ site_password }}"
+ register: vsan_cluster
+
+ - name: Configure VSAN on remaining hosts
+ vmware_vsan_cluster:
+ hostname: "{{ item }}"
+ username: "{{ esxi_username }}"
+ password: "{{ site_password }}"
+ cluster_uuid: "{{ vsan_cluster.cluster_uuid }}"
+ with_items: "{{ groups['esxi'][1:] }}"
+
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def create_vsan_cluster(host_system, new_cluster_uuid):
+ host_config_manager = host_system.configManager
+ vsan_system = host_config_manager.vsanSystem
+
+ vsan_config = vim.vsan.host.ConfigInfo()
+ vsan_config.enabled = True
+
+ if new_cluster_uuid is not None:
+ vsan_config.clusterInfo = vim.vsan.host.ConfigInfo.ClusterInfo()
+ vsan_config.clusterInfo.uuid = new_cluster_uuid
+
+ vsan_config.storageInfo = vim.vsan.host.ConfigInfo.StorageInfo()
+ vsan_config.storageInfo.autoClaimStorage = True
+
+ task = vsan_system.UpdateVsan_Task(vsan_config)
+ changed, result = wait_for_task(task)
+
+ host_status = vsan_system.QueryHostStatus()
+ cluster_uuid = host_status.uuid
+
+ return changed, result, cluster_uuid
+
+
+def main():
+
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(cluster_uuid=dict(required=False, type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ new_cluster_uuid = module.params['cluster_uuid']
+
+ try:
+ content = connect_to_api(module, False)
+ host = get_all_objs(content, [vim.HostSystem])
+ if not host:
+ module.fail_json(msg="Unable to locate Physical Host.")
+ host_system = host.keys()[0]
+ changed, result, cluster_uuid = create_vsan_cluster(host_system, new_cluster_uuid)
+ module.exit_json(changed=changed, result=result, cluster_uuid=cluster_uuid)
+
+ except vmodl.RuntimeFault as runtime_fault:
+ module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ module.fail_json(msg=str(e))
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vmware_vswitch.py b/lib/ansible/modules/cloud/vmware/vmware_vswitch.py
new file mode 100644
index 0000000000..ef14f2d6bf
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vmware_vswitch.py
@@ -0,0 +1,203 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2015, Joseph Callen <jcallen () csc.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vmware_vswitch
+short_description: Add a VMware Standard Switch to an ESXi host
+description:
+ - Add a VMware Standard Switch to an ESXi host
+version_added: 2.0
+author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
+notes:
+ - Tested on vSphere 5.5
+requirements:
+ - "python >= 2.6"
+ - PyVmomi
+options:
+ switch_name:
+ description:
+ - vSwitch name to add
+ required: True
+ nic_name:
+ description:
+ - vmnic name to attach to vswitch
+ required: True
+ number_of_ports:
+ description:
+ - Number of port to configure on vswitch
+ default: 128
+ required: False
+ mtu:
+ description:
+ - MTU to configure on vswitch
+ required: False
+ state:
+ description:
+ - Add or remove the switch
+ default: 'present'
+ choices:
+ - 'present'
+ - 'absent'
+ required: False
+extends_documentation_fragment: vmware.documentation
+'''
+
+EXAMPLES = '''
+Example from Ansible playbook
+
+ - name: Add a VMware vSwitch
+ local_action:
+ module: vmware_vswitch
+ hostname: esxi_hostname
+ username: esxi_username
+ password: esxi_password
+ switch_name: vswitch_name
+ nic_name: vmnic_name
+ mtu: 9000
+'''
+
+try:
+ from pyVmomi import vim, vmodl
+ HAS_PYVMOMI = True
+except ImportError:
+ HAS_PYVMOMI = False
+
+
+def find_vswitch_by_name(host, vswitch_name):
+ for vss in host.config.network.vswitch:
+ if vss.name == vswitch_name:
+ return vss
+ return None
+
+
+class VMwareHostVirtualSwitch(object):
+
+ def __init__(self, module):
+ self.host_system = None
+ self.content = None
+ self.vss = None
+ self.module = module
+ self.switch_name = module.params['switch_name']
+ self.number_of_ports = module.params['number_of_ports']
+ self.nic_name = module.params['nic_name']
+ self.mtu = module.params['mtu']
+ self.state = module.params['state']
+ self.content = connect_to_api(self.module)
+
+ def process_state(self):
+ try:
+ vswitch_states = {
+ 'absent': {
+ 'present': self.state_destroy_vswitch,
+ 'absent': self.state_exit_unchanged,
+ },
+ 'present': {
+ 'update': self.state_update_vswitch,
+ 'present': self.state_exit_unchanged,
+ 'absent': self.state_create_vswitch,
+ }
+ }
+
+ vswitch_states[self.state][self.check_vswitch_configuration()]()
+
+ except vmodl.RuntimeFault as runtime_fault:
+ self.module.fail_json(msg=runtime_fault.msg)
+ except vmodl.MethodFault as method_fault:
+ self.module.fail_json(msg=method_fault.msg)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+
+ # Source from
+ # https://github.com/rreubenur/pyvmomi-community-samples/blob/patch-1/samples/create_vswitch.py
+
+ def state_create_vswitch(self):
+ vss_spec = vim.host.VirtualSwitch.Specification()
+ vss_spec.numPorts = self.number_of_ports
+ vss_spec.mtu = self.mtu
+ vss_spec.bridge = vim.host.VirtualSwitch.BondBridge(nicDevice=[self.nic_name])
+ self.host_system.configManager.networkSystem.AddVirtualSwitch(vswitchName=self.switch_name, spec=vss_spec)
+ self.module.exit_json(changed=True)
+
+ def state_exit_unchanged(self):
+ self.module.exit_json(changed=False)
+
+ def state_destroy_vswitch(self):
+ config = vim.host.NetworkConfig()
+
+ for portgroup in self.host_system.configManager.networkSystem.networkInfo.portgroup:
+ if portgroup.spec.vswitchName == self.vss.name:
+ portgroup_config = vim.host.PortGroup.Config()
+ portgroup_config.changeOperation = "remove"
+ portgroup_config.spec = vim.host.PortGroup.Specification()
+ portgroup_config.spec.name = portgroup.spec.name
+ portgroup_config.spec.name = portgroup.spec.name
+ portgroup_config.spec.vlanId = portgroup.spec.vlanId
+ portgroup_config.spec.vswitchName = portgroup.spec.vswitchName
+ portgroup_config.spec.policy = vim.host.NetworkPolicy()
+ config.portgroup.append(portgroup_config)
+
+ self.host_system.configManager.networkSystem.UpdateNetworkConfig(config, "modify")
+ self.host_system.configManager.networkSystem.RemoveVirtualSwitch(self.vss.name)
+ self.module.exit_json(changed=True)
+
+ def state_update_vswitch(self):
+ self.module.exit_json(changed=False, msg="Currently not implemented.")
+
+ def check_vswitch_configuration(self):
+ host = get_all_objs(self.content, [vim.HostSystem])
+ if not host:
+ self.module.fail_json(msg="Unable to find host")
+
+ self.host_system = host.keys()[0]
+ self.vss = find_vswitch_by_name(self.host_system, self.switch_name)
+
+ if self.vss is None:
+ return 'absent'
+ else:
+ return 'present'
+
+
+def main():
+ argument_spec = vmware_argument_spec()
+ argument_spec.update(dict(switch_name=dict(required=True, type='str'),
+ nic_name=dict(required=True, type='str'),
+ number_of_ports=dict(required=False, type='int', default=128),
+ mtu=dict(required=False, type='int', default=1500),
+ state=dict(default='present', choices=['present', 'absent'], type='str')))
+
+ module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
+
+ if not HAS_PYVMOMI:
+ module.fail_json(msg='pyvmomi is required for this module')
+
+ host_virtual_switch = VMwareHostVirtualSwitch(module)
+ host_virtual_switch.process_state()
+
+from ansible.module_utils.vmware import *
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/vmware/vsphere_copy.py b/lib/ansible/modules/cloud/vmware/vsphere_copy.py
new file mode 100644
index 0000000000..7e2ef125c8
--- /dev/null
+++ b/lib/ansible/modules/cloud/vmware/vsphere_copy.py
@@ -0,0 +1,195 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2015 Dag Wieers <dag@wieers.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: vsphere_copy
+short_description: Copy a file to a vCenter datastore
+description:
+ - Upload files to a vCenter datastore
+version_added: 2.0
+author: Dag Wieers (@dagwieers) <dag@wieers.com>
+options:
+ host:
+ description:
+ - The vCenter server on which the datastore is available.
+ required: true
+ login:
+ description:
+ - The login name to authenticate on the vCenter server.
+ required: true
+ password:
+ description:
+ - The password to authenticate on the vCenter server.
+ required: true
+ src:
+ description:
+ - The file to push to vCenter
+ required: true
+ datacenter:
+ description:
+ - The datacenter on the vCenter server that holds the datastore.
+ required: true
+ datastore:
+ description:
+ - The datastore on the vCenter server to push files to.
+ required: true
+ path:
+ description:
+ - The file to push to the datastore on the vCenter server.
+ required: true
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be
+ set to C(no) when no other option exists.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+
+notes:
+ - "This module ought to be run from a system that can access vCenter directly and has the file to transfer.
+ It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to)."
+ - Tested on vSphere 5.5
+'''
+
+EXAMPLES = '''
+- vsphere_copy:
+ host: vhost
+ login: vuser
+ password: vpass
+ src: /some/local/file
+ datacenter: DC1 Someplace
+ datastore: datastore1
+ path: some/remote/file
+ transport: local
+- vsphere_copy:
+ host: vhost
+ login: vuser
+ password: vpass
+ src: /other/local/file
+ datacenter: DC2 Someplace
+ datastore: datastore2
+ path: other/remote/file
+ delegate_to: other_system
+'''
+
+import atexit
+import urllib
+import mmap
+import errno
+import socket
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.pycompat24 import get_exception
+from ansible.module_utils.urls import open_url
+
+def vmware_path(datastore, datacenter, path):
+ ''' Constructs a URL path that VSphere accepts reliably '''
+ path = "/folder/%s" % path.lstrip("/")
+ # Due to a software bug in vSphere, it fails to handle ampersand in datacenter names
+ # The solution is to do what vSphere does (when browsing) and double-encode ampersands, maybe others ?
+ datacenter = datacenter.replace('&', '%26')
+ if not path.startswith("/"):
+ path = "/" + path
+ params = dict( dsName = datastore )
+ if datacenter:
+ params["dcPath"] = datacenter
+ params = urllib.urlencode(params)
+ return "%s?%s" % (path, params)
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ host = dict(required=True, aliases=[ 'hostname' ]),
+ login = dict(required=True, aliases=[ 'username' ]),
+ password = dict(required=True, no_log=True),
+ src = dict(required=True, aliases=[ 'name' ]),
+ datacenter = dict(required=True),
+ datastore = dict(required=True),
+ dest = dict(required=True, aliases=[ 'path' ]),
+ validate_certs = dict(required=False, default=True, type='bool'),
+ ),
+ # Implementing check-mode using HEAD is impossible, since size/date is not 100% reliable
+ supports_check_mode = False,
+ )
+
+ host = module.params.get('host')
+ login = module.params.get('login')
+ password = module.params.get('password')
+ src = module.params.get('src')
+ datacenter = module.params.get('datacenter')
+ datastore = module.params.get('datastore')
+ dest = module.params.get('dest')
+ validate_certs = module.params.get('validate_certs')
+
+ fd = open(src, "rb")
+ atexit.register(fd.close)
+
+ data = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ)
+ atexit.register(data.close)
+
+ remote_path = vmware_path(datastore, datacenter, dest)
+ url = 'https://%s%s' % (host, remote_path)
+
+ headers = {
+ "Content-Type": "application/octet-stream",
+ "Content-Length": str(len(data)),
+ }
+
+ try:
+ r = open_url(url, data=data, headers=headers, method='PUT',
+ url_username=login, url_password=password, validate_certs=validate_certs,
+ force_basic_auth=True)
+ except socket.error:
+ e = get_exception()
+ if isinstance(e.args, tuple) and e[0] == errno.ECONNRESET:
+ # VSphere resets connection if the file is in use and cannot be replaced
+ module.fail_json(msg='Failed to upload, image probably in use', status=None, errno=e[0], reason=str(e), url=url)
+ else:
+ module.fail_json(msg=str(e), status=None, errno=e[0], reason=str(e), url=url)
+ except Exception:
+ e = get_exception()
+ error_code = -1
+ try:
+ if isinstance(e[0], int):
+ error_code = e[0]
+ except KeyError:
+ pass
+ module.fail_json(msg=str(e), status=None, errno=error_code, reason=str(e), url=url)
+
+ status = r.getcode()
+ if 200 <= status < 300:
+ module.exit_json(changed=True, status=status, reason=r.msg, url=url)
+ else:
+ length = r.headers.get('content-length', None)
+ if r.headers.get('transfer-encoding', '').lower() == 'chunked':
+ chunked = 1
+ else:
+ chunked = 0
+
+ module.fail_json(msg='Failed to upload', errno=None, status=status, reason=r.msg, length=length, headers=dict(r.headers), chunked=chunked, url=url)
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/webfaction/__init__.py b/lib/ansible/modules/cloud/webfaction/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/ansible/modules/cloud/webfaction/__init__.py
diff --git a/lib/ansible/modules/cloud/webfaction/webfaction_app.py b/lib/ansible/modules/cloud/webfaction/webfaction_app.py
new file mode 100644
index 0000000000..63c00bd177
--- /dev/null
+++ b/lib/ansible/modules/cloud/webfaction/webfaction_app.py
@@ -0,0 +1,204 @@
+#!/usr/bin/python
+#
+# Create a Webfaction application using Ansible and the Webfaction API
+#
+# Valid application types can be found by looking here:
+# http://docs.webfaction.com/xmlrpc-api/apps.html#application-types
+#
+# ------------------------------------------
+#
+# (c) Quentin Stafford-Fraser 2015, with contributions gratefully acknowledged from:
+# * Andy Baker
+# * Federico Tarantini
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: webfaction_app
+short_description: Add or remove applications on a Webfaction host
+description:
+ - Add or remove applications on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+version_added: "2.0"
+notes:
+ - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
+ - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+ name:
+ description:
+ - The name of the application
+ required: true
+
+ state:
+ description:
+ - Whether the application should exist
+ required: false
+ choices: ['present', 'absent']
+ default: "present"
+
+ type:
+ description:
+ - The type of application to create. See the Webfaction docs at http://docs.webfaction.com/xmlrpc-api/apps.html for a list.
+ required: true
+
+ autostart:
+ description:
+ - Whether the app should restart with an autostart.cgi script
+ required: false
+ default: "no"
+
+ extra_info:
+ description:
+ - Any extra parameters required by the app
+ required: false
+ default: null
+
+ port_open:
+ description:
+ - IF the port should be opened
+ required: false
+ default: false
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+
+ machine:
+ description:
+ - The machine name to use (optional for accounts with only one machine)
+ required: false
+
+'''
+
+EXAMPLES = '''
+ - name: Create a test app
+ webfaction_app:
+ name="my_wsgi_app1"
+ state=present
+ type=mod_wsgi35-python27
+ login_name={{webfaction_user}}
+ login_password={{webfaction_passwd}}
+ machine={{webfaction_machine}}
+'''
+
+import xmlrpclib
+
+webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ state = dict(required=False, choices=['present', 'absent'], default='present'),
+ type = dict(required=True),
+ autostart = dict(required=False, type='bool', default=False),
+ extra_info = dict(required=False, default=""),
+ port_open = dict(required=False, type='bool', default=False),
+ login_name = dict(required=True),
+ login_password = dict(required=True),
+ machine = dict(required=False, default=False),
+ ),
+ supports_check_mode=True
+ )
+ app_name = module.params['name']
+ app_type = module.params['type']
+ app_state = module.params['state']
+
+ if module.params['machine']:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password'],
+ module.params['machine']
+ )
+ else:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ app_list = webfaction.list_apps(session_id)
+ app_map = dict([(i['name'], i) for i in app_list])
+ existing_app = app_map.get(app_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if app_state == 'present':
+
+ # Does an app with this name already exist?
+ if existing_app:
+ if existing_app['type'] != app_type:
+ module.fail_json(msg="App already exists with different type. Please fix by hand.")
+
+ # If it exists with the right type, we don't change it
+ # Should check other parameters.
+ module.exit_json(
+ changed = False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the app
+ result.update(
+ webfaction.create_app(
+ session_id, app_name, app_type,
+ module.boolean(module.params['autostart']),
+ module.params['extra_info'],
+ module.boolean(module.params['port_open'])
+ )
+ )
+
+ elif app_state == 'absent':
+
+ # If the app's already not there, nothing changed.
+ if not existing_app:
+ module.exit_json(
+ changed = False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the app
+ result.update(
+ webfaction.delete_app(session_id, app_name)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {}".format(app_state))
+
+
+ module.exit_json(
+ changed = True,
+ result = result
+ )
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/webfaction/webfaction_db.py b/lib/ansible/modules/cloud/webfaction/webfaction_db.py
new file mode 100644
index 0000000000..6fe785f76a
--- /dev/null
+++ b/lib/ansible/modules/cloud/webfaction/webfaction_db.py
@@ -0,0 +1,205 @@
+#!/usr/bin/python
+#
+# Create a webfaction database using Ansible and the Webfaction API
+#
+# ------------------------------------------
+#
+# (c) Quentin Stafford-Fraser 2015, with contributions gratefully acknowledged from:
+# * Andy Baker
+# * Federico Tarantini
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: webfaction_db
+short_description: Add or remove a database on Webfaction
+description:
+ - Add or remove a database on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+version_added: "2.0"
+notes:
+ - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
+ - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+options:
+
+ name:
+ description:
+ - The name of the database
+ required: true
+
+ state:
+ description:
+ - Whether the database should exist
+ required: false
+ choices: ['present', 'absent']
+ default: "present"
+
+ type:
+ description:
+ - The type of database to create.
+ required: true
+ choices: ['mysql', 'postgresql']
+
+ password:
+ description:
+ - The password for the new database user.
+ required: false
+ default: None
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+
+ machine:
+ description:
+ - The machine name to use (optional for accounts with only one machine)
+ required: false
+'''
+
+EXAMPLES = '''
+ # This will also create a default DB user with the same
+ # name as the database, and the specified password.
+
+ - name: Create a database
+ webfaction_db:
+ name: "{{webfaction_user}}_db1"
+ password: mytestsql
+ type: mysql
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+ machine: "{{webfaction_machine}}"
+
+ # Note that, for symmetry's sake, deleting a database using
+ # 'state: absent' will also delete the matching user.
+
+'''
+
+import socket
+import xmlrpclib
+
+webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ state = dict(required=False, choices=['present', 'absent'], default='present'),
+ # You can specify an IP address or hostname.
+ type = dict(required=True),
+ password = dict(required=False, default=None),
+ login_name = dict(required=True),
+ login_password = dict(required=True),
+ machine = dict(required=False, default=False),
+ ),
+ supports_check_mode=True
+ )
+ db_name = module.params['name']
+ db_state = module.params['state']
+ db_type = module.params['type']
+ db_passwd = module.params['password']
+
+ if module.params['machine']:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password'],
+ module.params['machine']
+ )
+ else:
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ db_list = webfaction.list_dbs(session_id)
+ db_map = dict([(i['name'], i) for i in db_list])
+ existing_db = db_map.get(db_name)
+
+ user_list = webfaction.list_db_users(session_id)
+ user_map = dict([(i['username'], i) for i in user_list])
+ existing_user = user_map.get(db_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if db_state == 'present':
+
+ # Does a database with this name already exist?
+ if existing_db:
+ # Yes, but of a different type - fail
+ if existing_db['db_type'] != db_type:
+ module.fail_json(msg="Database already exists but is a different type. Please fix by hand.")
+
+ # If it exists with the right type, we don't change anything.
+ module.exit_json(
+ changed = False,
+ )
+
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the db
+ # and default user.
+ result.update(
+ webfaction.create_db(
+ session_id, db_name, db_type, db_passwd
+ )
+ )
+
+ elif db_state == 'absent':
+
+ # If this isn't a dry run...
+ if not module.check_mode:
+
+ if not (existing_db or existing_user):
+ module.exit_json(changed = False,)
+
+ if existing_db:
+ # Delete the db if it exists
+ result.update(
+ webfaction.delete_db(session_id, db_name, db_type)
+ )
+
+ if existing_user:
+ # Delete the default db user if it exists
+ result.update(
+ webfaction.delete_db_user(session_id, db_name, db_type)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {}".format(db_state))
+
+ module.exit_json(
+ changed = True,
+ result = result
+ )
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/webfaction/webfaction_domain.py b/lib/ansible/modules/cloud/webfaction/webfaction_domain.py
new file mode 100644
index 0000000000..859209c9ce
--- /dev/null
+++ b/lib/ansible/modules/cloud/webfaction/webfaction_domain.py
@@ -0,0 +1,176 @@
+#!/usr/bin/python
+#
+# Create Webfaction domains and subdomains using Ansible and the Webfaction API
+#
+# ------------------------------------------
+#
+# (c) Quentin Stafford-Fraser 2015
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: webfaction_domain
+short_description: Add or remove domains and subdomains on Webfaction
+description:
+ - Add or remove domains or subdomains on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+version_added: "2.0"
+notes:
+ - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. If you don't specify subdomains, the domain will be deleted.
+ - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
+ - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+
+ name:
+ description:
+ - The name of the domain
+ required: true
+
+ state:
+ description:
+ - Whether the domain should exist
+ required: false
+ choices: ['present', 'absent']
+ default: "present"
+
+ subdomains:
+ description:
+ - Any subdomains to create.
+ required: false
+ default: null
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create a test domain
+ webfaction_domain:
+ name: mydomain.com
+ state: present
+ subdomains:
+ - www
+ - blog
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+
+ - name: Delete test domain and any subdomains
+ webfaction_domain:
+ name: mydomain.com
+ state: absent
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+
+'''
+
+import socket
+import xmlrpclib
+
+webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ state = dict(required=False, choices=['present', 'absent'], default='present'),
+ subdomains = dict(required=False, default=[]),
+ login_name = dict(required=True),
+ login_password = dict(required=True),
+ ),
+ supports_check_mode=True
+ )
+ domain_name = module.params['name']
+ domain_state = module.params['state']
+ domain_subdomains = module.params['subdomains']
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ domain_list = webfaction.list_domains(session_id)
+ domain_map = dict([(i['domain'], i) for i in domain_list])
+ existing_domain = domain_map.get(domain_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if domain_state == 'present':
+
+ # Does an app with this name already exist?
+ if existing_domain:
+
+ if set(existing_domain['subdomains']) >= set(domain_subdomains):
+ # If it exists with the right subdomains, we don't change anything.
+ module.exit_json(
+ changed = False,
+ )
+
+ positional_args = [session_id, domain_name] + domain_subdomains
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the app
+ # print positional_args
+ result.update(
+ webfaction.create_domain(
+ *positional_args
+ )
+ )
+
+ elif domain_state == 'absent':
+
+ # If the app's already not there, nothing changed.
+ if not existing_domain:
+ module.exit_json(
+ changed = False,
+ )
+
+ positional_args = [session_id, domain_name] + domain_subdomains
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the app
+ result.update(
+ webfaction.delete_domain(*positional_args)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {}".format(domain_state))
+
+ module.exit_json(
+ changed = True,
+ result = result
+ )
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/webfaction/webfaction_mailbox.py b/lib/ansible/modules/cloud/webfaction/webfaction_mailbox.py
new file mode 100644
index 0000000000..2132eeaffb
--- /dev/null
+++ b/lib/ansible/modules/cloud/webfaction/webfaction_mailbox.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+#
+# Create webfaction mailbox using Ansible and the Webfaction API
+#
+# ------------------------------------------
+# (c) Quentin Stafford-Fraser and Andy Baker 2015
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: webfaction_mailbox
+short_description: Add or remove mailboxes on Webfaction
+description:
+ - Add or remove mailboxes on a Webfaction account. Further documentation at http://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+version_added: "2.0"
+notes:
+ - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
+ - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+options:
+
+ mailbox_name:
+ description:
+ - The name of the mailbox
+ required: true
+
+ mailbox_password:
+ description:
+ - The password for the mailbox
+ required: true
+ default: null
+
+ state:
+ description:
+ - Whether the mailbox should exist
+ required: false
+ choices: ['present', 'absent']
+ default: "present"
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: Create a mailbox
+ webfaction_mailbox:
+ mailbox_name="mybox"
+ mailbox_password="myboxpw"
+ state=present
+ login_name={{webfaction_user}}
+ login_password={{webfaction_passwd}}
+'''
+
+import socket
+import xmlrpclib
+
+webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ mailbox_name=dict(required=True),
+ mailbox_password=dict(required=True),
+ state=dict(required=False, choices=['present', 'absent'], default='present'),
+ login_name=dict(required=True),
+ login_password=dict(required=True),
+ ),
+ supports_check_mode=True
+ )
+
+ mailbox_name = module.params['mailbox_name']
+ site_state = module.params['state']
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ mailbox_list = [x['mailbox'] for x in webfaction.list_mailboxes(session_id)]
+ existing_mailbox = mailbox_name in mailbox_list
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if site_state == 'present':
+
+ # Does a mailbox with this name already exist?
+ if existing_mailbox:
+ module.exit_json(changed=False,)
+
+ positional_args = [session_id, mailbox_name]
+
+ if not module.check_mode:
+ # If this isn't a dry run, create the mailbox
+ result.update(webfaction.create_mailbox(*positional_args))
+
+ elif site_state == 'absent':
+
+ # If the mailbox is already not there, nothing changed.
+ if not existing_mailbox:
+ module.exit_json(changed=False)
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the mailbox
+ result.update(webfaction.delete_mailbox(session_id, mailbox_name))
+
+ else:
+ module.fail_json(msg="Unknown state specified: {}".format(site_state))
+
+ module.exit_json(changed=True, result=result)
+
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/webfaction/webfaction_site.py b/lib/ansible/modules/cloud/webfaction/webfaction_site.py
new file mode 100644
index 0000000000..08a9b4d76d
--- /dev/null
+++ b/lib/ansible/modules/cloud/webfaction/webfaction_site.py
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+#
+# Create Webfaction website using Ansible and the Webfaction API
+#
+# ------------------------------------------
+#
+# (c) Quentin Stafford-Fraser 2015
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+#
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: webfaction_site
+short_description: Add or remove a website on a Webfaction host
+description:
+ - Add or remove a website on a Webfaction host. Further documentation at http://github.com/quentinsf/ansible-webfaction.
+author: Quentin Stafford-Fraser (@quentinsf)
+version_added: "2.0"
+notes:
+ - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name.
+ - If a site of the same name exists in the account but on a different host, the operation will exit.
+ - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
+ - See `the webfaction API <http://docs.webfaction.com/xmlrpc-api/>`_ for more info.
+
+options:
+
+ name:
+ description:
+ - The name of the website
+ required: true
+
+ state:
+ description:
+ - Whether the website should exist
+ required: false
+ choices: ['present', 'absent']
+ default: "present"
+
+ host:
+ description:
+ - The webfaction host on which the site should be created.
+ required: true
+
+ https:
+ description:
+ - Whether or not to use HTTPS
+ required: false
+ choices:
+ - true
+ - false
+ default: 'false'
+
+ site_apps:
+ description:
+ - A mapping of URLs to apps
+ required: false
+
+ subdomains:
+ description:
+ - A list of subdomains associated with this site.
+ required: false
+ default: null
+
+ login_name:
+ description:
+ - The webfaction account to use
+ required: true
+
+ login_password:
+ description:
+ - The webfaction password to use
+ required: true
+'''
+
+EXAMPLES = '''
+ - name: create website
+ webfaction_site:
+ name: testsite1
+ state: present
+ host: myhost.webfaction.com
+ subdomains:
+ - 'testsite1.my_domain.org'
+ site_apps:
+ - ['testapp1', '/']
+ https: no
+ login_name: "{{webfaction_user}}"
+ login_password: "{{webfaction_passwd}}"
+'''
+
+import socket
+import xmlrpclib
+
+webfaction = xmlrpclib.ServerProxy('https://api.webfaction.com/')
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ state = dict(required=False, choices=['present', 'absent'], default='present'),
+ # You can specify an IP address or hostname.
+ host = dict(required=True),
+ https = dict(required=False, type='bool', default=False),
+ subdomains = dict(required=False, type='list', default=[]),
+ site_apps = dict(required=False, type='list', default=[]),
+ login_name = dict(required=True),
+ login_password = dict(required=True),
+ ),
+ supports_check_mode=True
+ )
+ site_name = module.params['name']
+ site_state = module.params['state']
+ site_host = module.params['host']
+ site_ip = socket.gethostbyname(site_host)
+
+ session_id, account = webfaction.login(
+ module.params['login_name'],
+ module.params['login_password']
+ )
+
+ site_list = webfaction.list_websites(session_id)
+ site_map = dict([(i['name'], i) for i in site_list])
+ existing_site = site_map.get(site_name)
+
+ result = {}
+
+ # Here's where the real stuff happens
+
+ if site_state == 'present':
+
+ # Does a site with this name already exist?
+ if existing_site:
+
+ # If yes, but it's on a different IP address, then fail.
+ # If we wanted to allow relocation, we could add a 'relocate=true' option
+ # which would get the existing IP address, delete the site there, and create it
+ # at the new address. A bit dangerous, perhaps, so for now we'll require manual
+ # deletion if it's on another host.
+
+ if existing_site['ip'] != site_ip:
+ module.fail_json(msg="Website already exists with a different IP address. Please fix by hand.")
+
+ # If it's on this host and the key parameters are the same, nothing needs to be done.
+
+ if (existing_site['https'] == module.boolean(module.params['https'])) and \
+ (set(existing_site['subdomains']) == set(module.params['subdomains'])) and \
+ (dict(existing_site['website_apps']) == dict(module.params['site_apps'])):
+ module.exit_json(
+ changed = False
+ )
+
+ positional_args = [
+ session_id, site_name, site_ip,
+ module.boolean(module.params['https']),
+ module.params['subdomains'],
+ ]
+ for a in module.params['site_apps']:
+ positional_args.append( (a[0], a[1]) )
+
+ if not module.check_mode:
+ # If this isn't a dry run, create or modify the site
+ result.update(
+ webfaction.create_website(
+ *positional_args
+ ) if not existing_site else webfaction.update_website (
+ *positional_args
+ )
+ )
+
+ elif site_state == 'absent':
+
+ # If the site's already not there, nothing changed.
+ if not existing_site:
+ module.exit_json(
+ changed = False,
+ )
+
+ if not module.check_mode:
+ # If this isn't a dry run, delete the site
+ result.update(
+ webfaction.delete_website(session_id, site_name, site_ip)
+ )
+
+ else:
+ module.fail_json(msg="Unknown state specified: {}".format(site_state))
+
+ module.exit_json(
+ changed = True,
+ result = result
+ )
+
+
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()
diff --git a/lib/ansible/modules/cloud/xenserver_facts.py b/lib/ansible/modules/cloud/xenserver_facts.py
new file mode 100644
index 0000000000..d908e5a3fd
--- /dev/null
+++ b/lib/ansible/modules/cloud/xenserver_facts.py
@@ -0,0 +1,209 @@
+#!/usr/bin/python -tt
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+
+ANSIBLE_METADATA = {'status': ['preview'],
+ 'supported_by': 'community',
+ 'version': '1.0'}
+
+DOCUMENTATION = '''
+---
+module: xenserver_facts
+version_added: "2.0"
+short_description: get facts reported on xenserver
+description:
+ - Reads data out of XenAPI, can be used instead of multiple xe commands.
+author:
+ - Andy Hill (@andyhky)
+ - Tim Rupp
+'''
+
+import platform
+
+HAVE_XENAPI = False
+try:
+ import XenAPI
+ HAVE_XENAPI = True
+except ImportError:
+ pass
+
+EXAMPLES = '''
+- name: Gather facts from xenserver
+ xenserver:
+
+- name: Print running VMs
+ debug: msg="{{ item }}"
+ with_items: "{{ xs_vms.keys() }}"
+ when: xs_vms[item]['power_state'] == "Running"
+
+TASK: [Print running VMs] ***********************************************************
+skipping: [10.13.0.22] => (item=CentOS 4.7 (32-bit))
+ok: [10.13.0.22] => (item=Control domain on host: 10.0.13.22) => {
+ "item": "Control domain on host: 10.0.13.22",
+ "msg": "Control domain on host: 10.0.13.22"
+}
+'''
+
+class XenServerFacts:
+ def __init__(self):
+ self.codes = {
+ '5.5.0': 'george',
+ '5.6.100': 'oxford',
+ '6.0.0': 'boston',
+ '6.1.0': 'tampa',
+ '6.2.0': 'clearwater'
+ }
+
+ @property
+ def version(self):
+ # Be aware! Deprecated in Python 2.6!
+ result = platform.dist()[1]
+ return result
+
+ @property
+ def codename(self):
+ if self.version in self.codes:
+ result = self.codes[self.version]
+ else:
+ result = None
+
+ return result
+
+
+def get_xenapi_session():
+ session = XenAPI.xapi_local()
+ session.xenapi.login_with_password('', '')
+ return session
+
+
+def get_networks(session):
+ recs = session.xenapi.network.get_all_records()
+ xs_networks = {}
+ networks = change_keys(recs, key='uuid')
+ for network in networks.itervalues():
+ xs_networks[network['name_label']] = network
+ return xs_networks
+
+
+def get_pifs(session):
+ recs = session.xenapi.PIF.get_all_records()
+ pifs = change_keys(recs, key='uuid')
+ xs_pifs = {}
+ devicenums = range(0, 7)
+ for pif in pifs.itervalues():
+ for eth in devicenums:
+ interface_name = "eth%s" % (eth)
+ bond_name = interface_name.replace('eth', 'bond')
+ if pif['device'] == interface_name:
+ xs_pifs[interface_name] = pif
+ elif pif['device'] == bond_name:
+ xs_pifs[bond_name] = pif
+ return xs_pifs
+
+
+def get_vlans(session):
+ recs = session.xenapi.VLAN.get_all_records()
+ return change_keys(recs, key='tag')
+
+
+def change_keys(recs, key='uuid', filter_func=None):
+ """
+ Take a xapi dict, and make the keys the value of recs[ref][key].
+
+ Preserves the ref in rec['ref']
+
+ """
+ new_recs = {}
+
+ for ref, rec in recs.iteritems():
+ if filter_func is not None and not filter_func(rec):
+ continue
+
+ new_recs[rec[key]] = rec
+ new_recs[rec[key]]['ref'] = ref
+
+ return new_recs
+
+def get_host(session):
+ """Get the host"""
+ host_recs = session.xenapi.host.get_all()
+ # We only have one host, so just return its entry
+ return session.xenapi.host.get_record(host_recs[0])
+
+def get_vms(session):
+ xs_vms = {}
+ recs = session.xenapi.VM.get_all()
+ if not recs:
+ return None
+
+ vms = change_keys(recs, key='uuid')
+ for vm in vms.itervalues():
+ xs_vms[vm['name_label']] = vm
+ return xs_vms
+
+
+def get_srs(session):
+ xs_srs = {}
+ recs = session.xenapi.SR.get_all()
+ if not recs:
+ return None
+ srs = change_keys(recs, key='uuid')
+ for sr in srs.itervalues():
+ xs_srs[sr['name_label']] = sr
+ return xs_srs
+
+def main():
+ module = AnsibleModule({})
+
+ if not HAVE_XENAPI:
+ module.fail_json(changed=False, msg="python xen api required for this module")
+
+ obj = XenServerFacts()
+ try:
+ session = get_xenapi_session()
+ except XenAPI.Failure as e:
+ module.fail_json(msg='%s' % e)
+
+ data = {
+ 'xenserver_version': obj.version,
+ 'xenserver_codename': obj.codename
+ }
+
+ xs_networks = get_networks(session)
+ xs_pifs = get_pifs(session)
+ xs_vlans = get_vlans(session)
+ xs_vms = get_vms(session)
+ xs_srs = get_srs(session)
+
+ if xs_vlans:
+ data['xs_vlans'] = xs_vlans
+ if xs_pifs:
+ data['xs_pifs'] = xs_pifs
+ if xs_networks:
+ data['xs_networks'] = xs_networks
+
+ if xs_vms:
+ data['xs_vms'] = xs_vms
+
+ if xs_srs:
+ data['xs_srs'] = xs_srs
+
+ module.exit_json(ansible=data)
+
+from ansible.module_utils.basic import *
+
+if __name__ == '__main__':
+ main()